Merge "ARM: dts: msm: Early mount of vendor partition for Kona"
diff --git a/Documentation/arm/msm/glink_pkt.txt b/Documentation/arm/msm/glink_pkt.txt
new file mode 100644
index 0000000..c6c7740
--- /dev/null
+++ b/Documentation/arm/msm/glink_pkt.txt
@@ -0,0 +1,196 @@
+Introduction
+============
+
+Glink packet drivers are companion adaptation driver which use the kernel APIs
+to expose the Glink core logical channels as charecter devices to the
+user-space clients.
+
+The Glink core APIs are detailed in Documentation/arm/msm/glink.txt.
+
+Software description
+====================
+
+Glink packet drivers supports the Glink core APIs to user-space client through
+standard file operations like open, read, write, ioctl, poll and release etc.
+The standard Linux permissions are used for the device node and SELinux does
+further security checks.
+
+
+ Device node [0..n]
+ |
+ |
+ -------------------
+ | VFS Framework |
+ -------------------
+ | |
+ | |
+ ------- -------
+ | CDEV | | CDEV |
+ | Dev 0 |...| Dev n |
+ ----------------------
+| Glink packet driver |
+ ----------------------
+ |
+ |
+ -----------------
+ | |
+ | G-Link core |
+ | |
+ -----------------
+ |
+ |
+ To Remote System
+
+
+The file operations map to the G-link client API as follows:
+
+Open():
+----------
+The Open system call is mapped to glink_open() which opens a channel. The
+expected channel configuration has to done through DT files. The full DT schema
+is detailed in Documentation/devicetree/bindings/arm/msm/glinkpkt.txt.
+
+Open on the glink packet character device is a blocking call which blocks until
+the channel is fully open by both local processor and remote processor.
+Clients can configure the blocking time through a device configurable parameter
+defined per device.
+
+The timeout value is specified in seconds with a default timeout of 1 second.
+A negative value indicates an infinite wait.
+
+Example:
+# get open timeout value
+ cat /sys/class/glinkpkt/device_name/open_timeout
+# set to 20 seconds value
+ echo 20 > /sys/class/glinkpkt/device_name/open_timeout
+
+If the channel is not opened by remote processor or any other problem which
+fails the channel to be ready will result in timeout and -ETIMEOUT will return
+to client. Open on success returns the valid file descriptor to client and on
+fail case standard Linux error codes.
+
+The same device can be opened by multiple clients but passing the same file
+descriptor from multiple threads may lead unexpected results.
+
+Write():
+----------
+The Write system call is mapped to glink_tx() which transmits the data over the
+glink channel.
+
+Read():
+----------
+The Read system call consumes any pending data on the channel. Glink signals
+incoming data through the glink_notify_rx() call back and the glink packet
+driver queues the data internally and provides to client through read system
+call. Once the Read is completed, the glink packet driver calls glink_rx_done()
+API to notify the completion of receiving operation to Glink core.
+
+ +
+ User-Space | Kernel-Space
+ |
+ |
++---------+ | +----------+ +------------+
+| Local | | | GlinkPKT | | |
+| Client | | | Driver | | Glink core |
+| | | | | | |
++---------+ | +----------+ +------------+
+ |
+ + | + +
+ | | | |
+ | open() | glink_pkt_open() | glink_open() |
+ | +--------------> | +-----------------> | +-----------------> |
+ | | | |
+ | File Handle[fd]| Valid Fd | Handle |
+ | <--------------+ | <-----------------+ | <-----------------+ |
+ | | | |
+ | Ioctl() | | |
+ | QUEUE_RX_INTENT | glink_pkt_ioctl() | glink_queue_rx_intent()
+ | +--------------> | +-----------------> | +-----------------> |
+ | | | |
+ | | | |
+ | <----------------------------------------------------------+ |
+ | | | |
+ | Read() | glink_pkt_read() | |
+ | +--------------> | +-----------------> | +---+ |
+ | | | | |
+ | | Wait for data |
+ | | | <---+ |
+ | | | glink_notify_rx() |
+ | | | <-----------------+ |
+ | | Wake-up read() | |
+ | | copy_to_user() | |
+ | read() return | <-----------------+ | |
+ | <--------------+ | | |
+ + | + +
+ |
+ |
+ +
+
+Clients can also poll on device node for POLLIN mask to get notification for
+any incoming data. Clients have to call the GLINK_PKT_IOCTL_QUEUE_RX_INTENT
+ioctl to queue the RX buffer to glink core in advance.
+
+Release():
+----------
+The Release system call is mapped to glink_close() to close a channel and free
+the resources.
+
+Poll():
+----------
+The Poll system call provides waiting operation like wait for incoming data on
+POLLIN mask and to get the TIOCM signal notification on POLLPRI mask. Clients
+can wait on poll for POLLPRI mask to get any notification regarding TICOM
+signals. In SSR case Poll call will return with POLLHUP mask and in this case
+client has to close and re-open the port.
+
+* POLLPRI - TIOCM bits changed
+* POLLIN - RX data available
+* POLLHUP - link is down due to either remote side closing or an SSR
+
+Ioctl():
+----------
+Multiple ioctls are supported to get the TICOM signal status and to queue the
+Rx intent with Glink core. Supported ioctls are TIOCMSET, TIOCMGET, TIOCMBIS,
+TIOCMBIC and GLINK_PKT_IOCTL_QUEUE_RX_INTENT.
+
+The GLINK_PKT_IOCTL_QUEUE_RX_INTENT ioctl is mapped to glink_queue_rx_intent()
+API which queues an RX intent with Glink core.
+
+Signals:
+==========
+Glink protocol provoide 32-bit control signal field to pass through for the
+clients-specific signaling where as Glink packet driver client which are from
+user space can use the signal filed as mentioned below.
+
+* 31:28 - Reserved for SMD RS-232 signals
+* 27:16 - Pass through for client usage
+* 15:0 - TICOM bits
+
+SSR operation:
+==============
+On remote subsystem restart all open channels on that edge will be closed and
+local clients have to close and re-open the channel to re-start the
+communication. All blocking calls such as open, read and write will be returned
+with -ENETRESET and the poll call will be return with the POLLHUP error codes.
+
+Files:
+==========
+Documentation/devicetree/bindings/arm/msm/glinkpkt.txt
+drivers/soc/qcom/msm_glink_pkt.c
+
+Wakelock:
+==========
+By default, GLINK PKT will acquire a wakelock for 2 seconds. To optimize this
+behavior, use the poll() function:
+ 1. Client calls poll() which blocks until data is available to read
+ 2. Data comes in, GLINK PKT grabs a wakelock and poll()is unblocked
+ 3. Client grabs wakelock to prevent system from suspending
+ 4. Client calls GLINK PKT read() to read the data
+ 5. GLINK PKT releases its wakelock
+ 6. Client Processes the data
+ 7. Client releases the wakelock
+
+Logging:
+==========
+ cat /d/ipc_logging/glink_pkt/log_cont
+
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d13420c..60cd17b 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -184,6 +184,23 @@
* qcom,inst-id: must be present. QMI instance id for remote ETMs.
+* Optional properties for funnels:
+
+ * qcom,duplicate-funnel: boolean, indicates its a duplicate of an
+ existing funnel. Funnel devices are now capable of supporting
+ multiple-input and multiple-output configuration with in built
+ hardware filtering for TPDM devices. Each set of input-output
+ combination is treated as independent funnel device.
+ funnel-base-dummy and funnel-base-real reg-names must be specified
+ when this property is enabled.
+
+ * reg-names: funnel-base-dummy: dummy register space used by a
+ duplicate funnel. Should be a valid register address space that
+ no other device is using.
+
+ * reg-names: funnel-base-real: actual register space for the
+ duplicate funnel.
+
Example:
1. Sinks
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
new file mode 100644
index 0000000..2c23b4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
@@ -0,0 +1,270 @@
+MSM Bus Devices
+
+The bus devices (fabrics/NoCs) are the interconnects between various
+components on chipsets. These devices form the backbone of the chip
+topology. Entire topology of the chipset is built using the
+device-tree data of these bus devices.
+
+To add the bus devices following properties are required:
+
+compatible: The bus devices need to be compatible with
+ msm-bus-fabric
+cell-id: A 32 bit integer unique per bus per chipset. The IDs
+ for buses are in multiples of 1024.
+label: Bus name
+qcom,fabclk-dual: Dual set (active/sleep) bus clock name
+qcom,fabclk-active: Active set bus clock name
+qcom,nfab: An integer property which specifies the total number
+ of buses on the chipset.
+
+The following properties are optional as a bus might not support
+these features:
+
+qcom,ntieredslaves: Number of tiered slaves on the bus.
+qcom,qos-freq: QoS frequency (In KHz)
+qcom,hw-sel: A string which decides whether QoS data
+ should be sent to RPM, set using BIMC or NoCs.
+ It can be set to "RPM", "NoC" or "BIMC".
+qcom,qos-baseoffset: Base address offset of QoS registers from the bus device
+ base address.
+qcom,qos-delta: Address delta between QoS registers of different masters.
+qcom,rpm-en: A boolean flag indicating whether RPM transactions are
+ supported for nodes of the bus.
+qcom,ahb: A boolean flag indicating whether the bus is ahb type.
+qcom,virt: A boolean property indicating this is a virtual bus.
+reg: Register space of the bus device. Not required in case
+ the bus is virtual.
+qom,nr-lim-thresh The threshold below which to apply throttling of non
+ real time masters.
+qcom,eff-fact The DDR effeciency factor to be assumed. This only
+ comes into play for buses that connect to the DDR.
+
+
+The following properties are optional as collecting data via coresight might
+not be supported for every bus. The documentation for coresight properties
+can be found in:
+Documentation/devicetree/bindings/coresight/coresight.txt
+
+coreisght-id Unique integer identifier for the bus.
+coresight-name Unique descriptive name of the bus.
+coresight-nr-inports Number of input ports on the bus.
+coresight-outports List of output port numbers on the bus.
+coresight-child-list List of phandles pointing to the children of this
+ component.
+coresight-child-ports List of input port numbers of the children.
+
+
+Any interconnect on the bus is represented as a child node.
+A child node can be of type: master, slave or a gateway.
+A gateway is an interconnect between buses and can be of both
+master and slave type.
+
+The following properties are available to characterize a child node.
+The properties can be chosen depending on the type of child node.
+
+cell-id: For a master the ID is between 0 - 512
+ For a slave the ID is between 512 - 1024
+label: Name of the master/slave/gateway
+qcom,masterp: Hardware master port number(s)
+qcom,tier: The tier to which a master/slave belongs.
+ Note that tiering might not be supported on
+ all architectures.
+qcom,hw-sel: A string which decides whether QoS data should be sent
+ to RPM, set using BIMC or NoCs.
+ It can be set to "RPM", "NoC" or "BIMC".
+qcom,mode: Used for masters on NoC/BIMC. Indicates which of the
+ four modes (Fixed/Limiter/Bypass/Regulator) the master
+ belongs to.
+qcom,perm-mode: Permissible mode switches. Indicates which of the four
+ modes are supported of the master node. Generally,
+ modes are set at boot-up and not switched at run-time.
+qcom,qport: QoS port number. This can be different from the
+ master-port number.
+qcom,ws: Window size (in Hz), used for NoC/BIMC masters to
+ calculate saturation values.
+qcom,mas-hw-id: A unique hardware ID agreed upon by processors across
+ the system. This ID is assigned to every master. It can
+ be used to send master specific data from
+ Apps/Modem/LPASS to RPM.
+qcom,slv-hw-id: A unique hardware ID agreed upon by processors across
+ the system. This ID is assigned to every slave. It can
+ be used to send slave specific data from
+qcom,slaveclk-dual: Dual set (active/sleep) slave clock name
+qcom,slaveclk-active: Active set slave clock name
+ Apps/Modem/LPASS to RPM.
+qcom,gateway: Flag indicating whether a particular node is a gateway.
+qcom,slavep: Hardware slave port number(s).
+qcom,buswidth: Width of the interconnect between a node and the bus.
+ (In Bytes).
+qcom,prio-rd: Read priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio-wr: Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0: Priority low signal for a NoC bus master
+ (Can be 0/1/2).
+qcom,prio1: Priority high signal for a NoC bus master
+ (Can be 0/1/2)
+qcom,dual-conf: Indicates whether a BIMC/NoC master can be configured
+ in multiple modes at run-time. (Boolean)
+qcom,mode-thresh: Threshold mode for a BIMC/NoC master. Beyond a certain
+ threshold frequency, a threshold mode can be used.
+ (Can be Fixed/Limiter/Bypass/Regulator)
+qcom,bimc,bw: Bandwidth limit for a BIMC master using dual modes.
+ This bandwidth is used to calculate Grant count and
+ other parameters used in Limiter and Regular mode.
+ for static BKE configuration. It is defined in KBytes/s.
+qcom,bimc,gp: Grant Period for configuring a master in limiter
+ mode. This is an integer value in nano-seconds.
+qcom,bimc,thmp: Medium threshold percentage for BIMC masters.
+ This percentage is used to calculate medium threshold
+ value for BIMC Masters in Limiter mode for static
+ configuration. This can be any integer value between
+ 1 and 100.
+qcom,thresh: Beyond this threshold frequency, the mode usage is
+ switched from mode specified by property qcom,mode
+ to the one specified by qcom,mode-thresh. These thresholds
+ can be setup in increasing order of thresholds, so the
+ requested IB is evaluated at each threshold level before
+ making the decision to switch QoS modes and applying the
+ corresponding qcom,bimc,bw limitig bw as needed.
+ This is specified in KBytes/s.
+qcom,rt-mas: Indicates if a master node is a realtime master with
+ hard deadlines.
+qcom,nr-lim: Indicates that this is non-real time master which can
+ be throttled in case of concurrent scenarios.
+qcom,floor-bw: Represents the floor bandwidth below which this master
+ cannot be throttled. This floor bandwidth is specified in
+ KBytes/s.
+qcom,ff: The fudge factor used by clients when voting for
+ bandwidth from the node.
+qcom,bcm-name: The name used to fetch details about the bcm device from
+ the command DB driver.
+qcom,drv-id: The DRV id associated with the RSC, used to differentiate
+ between RSCS owned by different execution environments.
+qcom,defer-init-qos: Flag to force defer initial QoS configuration at probe time.
+qcom,sbm-offset: The offset used to determine location of Sideband
+ Manager used in the disconnect mechanism when clients
+ remove bandwidth votes.
+qcom,disable-ports: The ports to disable on the sideband manager when the
+ requirement bandwidth affecting the node reduces to 0.
+node-reg-names: Names of the regulator associated with bus node used
+ to grab the phandle of the regulator.
+
+Example:
+
+
+ msm-mmss-noc@fc478000 {
+ compatible = "msm-bus-fabric";
+ reg = <0xfc478000 0x00004000>;
+ cell-id = <2048>;
+ label = "msm_mmss_noc";
+ qcom,fabclk-dual = "bus_clk";
+ qcom,fabclk-active = "bus_a_clk";
+ qcom,ntieredslaves = <0>;
+ qcom,qos-freq = <4800>;
+ qcom,hw-sel = "NoC";
+ qcom,rpm-en;
+ qcom,nfab = <6>;
+ qcom,sbm-offset = <20000>;
+
+ mas-gfx3d {
+ cell-id = <26>;
+ label = "mas-gfx3d";
+ qcom,masterp = <2 3>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "NoC";
+ qcom,perm-mode = "Bypass";
+ qcom,mode = "Bypass";
+ qcom,ws = <10000>;
+ qcom,qport = <2 3>;
+ qcom,mas-hw-id = <6>;
+ qcom,disable-ports = <1 2>;
+ };
+
+ mas-jpeg {
+ cell-id = <62>;
+ label = "mas-jpeg";
+ qcom,masterp = <4>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "NoC";
+ qcom,perm-mode = "Bypass";
+ qcom,mode = "Bypass";
+ qcom,qport = <0>;
+ qcom,ws = <10000>;
+ qcom,mas-hw-id = <7>;
+ };
+ };
+
+ msm-bimc@0xfc380000 {
+ compatible = "msm-bus-fabric";
+ reg = <0xfc380000 0x0006A000>;
+ cell-id = <0>;
+ label = "msm_bimc";
+ qcom,fabclk-dual = "mem_clk";
+ qcom,fabclk-active = "mem_a_clk";
+ qcom,ntieredslaves = <0>;
+ qcom,qos-freq = <19200>;
+ qcom,hw-sel = "BIMC";
+ qcom,rpm-en;
+
+ coresight-id = <55>;
+ coresight-name = "coresight-bimc";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_in1>;
+ coresight-child-ports = <3>;
+
+ mas-ampss-m0 {
+ cell-id = <1>;
+ label = "mas-ampss-m0";
+ qcom,masterp = <0>;
+ qcom,tier = <2>;
+ qcom,hw-sel = "BIMC";
+ qcom,mode = "Limiter";
+ qcom,qport = <0>;
+ qcom,ws = <10000>;
+ qcom,mas-hw-id = <0>;
+ qcom,prio-rd = <0>;
+ qcom,prio-wr = <0>;
+ qcom,mode-thresh = "Fixed";
+ qcom,thresh = <2000000>;
+ qcom,dual-conf;
+ qcom,bimc,bw = <300000>;
+ qcom,bimc,gp = <5>;
+ qcom,bimc,thmp = <50>;
+ };
+ };
+
+
+
+
+The bus scaling driver also provides the ability to configure
+bus performance parameters across the entire chip-set.
+Various clients use MSM scaling APIs to request bandwidth
+between multiple master-slave pairs. The bus driver then finds
+the optimal path between the master and the slave, and aggregates
+the bandwidth and clock requests for all master-slave pairs on
+that path, and programs hardware accordingly.
+
+The device-tree data required for bus-scaling can be embedded within
+the clients' device nodes. The clients can register with the bus driver
+using the following properties:
+
+- qcom,msm-bus,name: String representing the client-name
+- qcom,msm-bus,num-cases: Total number of usecases
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
+ dual (active & sleep) contex
+- qcom,msm-bus,num-paths: Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+ master-id, slave-id, arbitrated bandwidth
+ in KBps, instantaneous bandwidth in KBps
+
+Example:
+
+ qcom,msm-bus,name = "client-name";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors =
+ <22 512 0 0>, <26 512 0 0>,
+ <22 512 320000 3200000>, <26 512 3200000 3200000>,
+ <22 512 160000 1600000>, <26 512 1600000 1600000>;
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
new file mode 100644
index 0000000..a4778ef7
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
@@ -0,0 +1,270 @@
+MSM Bus Devices for adhoc bus topologies
+
+Buses are the interconnects between various devices. The devices are
+connected in different topologies. The bus scaling driver accepts
+bandwidth requests from clients and ensures that the bandwidth requests
+can be met between the source and destination for that client.
+In order to accept and honor bandwidth requests the bus scaling driver
+needs to know about the bus topology.
+This device tree binding represents the bus devices in the SOC, their
+connections to other bus devices and the resources associated with each
+node. The bus scaling driver uses this device tree to setup the bus
+topology in order to apply client bandwidth requests.
+
+The mandatory properties for bus driver are:
+
+compatible: "qcom,msm-bus-device"
+compatible: "qcom,msm-bus-rsc"
+
+The register space associated with the bus devices are represented with
+the following optional properties:
+reg: Register space for a bus device.
+reg-name: Name of the register space for the bus device.
+
+The child nodes represent the devices on the bus.
+
+The following properties are mandatory for a child node
+
+cell-id: The unique device id of the child node.
+ For a master the ID is between 0 - 512
+ For a slave the ID is between 512 - 1024
+ For internal nodes the range is > 10000
+ The range of ids for the different types of child
+ devices are chosen for convenience, the only
+ requirement is that the id's be unique among the
+ child devices.
+label: Unique name of the device.
+
+The following are optional properties for child nodes:
+
+
+qcom,fab-dev: Optional boolean parameter that states if the device
+ is a fabric device or not.
+ Typically these optional properties are used for
+ devices that represent fabric devices.
+qcom,bypass-qos-prg: Optional debug parameter to avoid programming the QoS
+ HW registers for a given fabric device.
+ Typically these optional properties are used for
+ devices that represent fabric devices.
+qcom,base-name: Parameter that specifies the physical base address for
+ accessing registers associated with the child device.
+ Typically these optional properties are used for
+ devices that represent fabric devices.
+qcom,base-offset: Parameter that gives the offset from the base address to access
+ the QoS registers.
+ Typically these optional properties are used for
+ devices that represent fabric devices.
+qcom,qos-off: Parameter that represents the delta between QoS register address
+ space for different devices.
+ Typically these optional properties are used for
+ devices that represent fabric devices.
+qcom,agg-scheme: Parameter that represents the aggregation scheme to be used for the
+ node. This parameter defaults to LEGACY scheme. The valid options
+ are LEGACY/SCHEME_1.
+qcom,util-fact: Parameter that represents the DDR utilization factor to be used in
+ LEGACY scheme. It is represented as actual util-factor * 100.
+qcom,vrail-comp: Parameter that represents the voltage rail compensation to push
+ the bus to the next level if needed in LEGACY and SCHEME 1 aggregation
+ schemes. It is represented as actual vrail-comp * 100.
+qcom,util-levels: Array of tuples that represent a bandwidth threshold and util factor
+ to be used uptil the given threshold.
+qcom,bus-type: Parameter that represents the bus type such as BIMC or NOC.
+ Typically these optional properties are used for
+ devices that represent fabric devices.
+bus-gdsc-supply: Optional fabric device parameter that is a reference to the dual
+ context GDSC supply that is needed before clock operations.
+bus-a-gdsc-supply: Optional fabric device parameter that is a reference to an active
+ only context GDSC supply that is needed before clock operations.
+bus-qos-gdsc-supply: Optional node or fabric device parameter that is a reference to a GDSC
+ supply that is needed before use of the clock needed to program
+ QoS registers.
+node-gdsc-supply: Optional node device parameter that is a reference to a GDSC supply
+ that is needed before node-clock operations.
+qcom,enable-only-clk: Optional property that is represents if the clock doesn't support
+ the clk_set_rate API and should only be enabled/disabled.
+qcom,setrate-only-clk: Optional property that is indicates that bus driver should only
+ set a rate on a clock handle and not call the enable/disable
+ clock API.
+clock-names: Optional property that represents the clock name associated
+ with the device "bus_clk", "bus_a_clk";
+clocks: Property pair that represents the clock controller and the clock
+ id. This in combimination with the clock-name is used to obtain
+ the handle for the clock associated with this device.
+qcom,virt-dev: Parameter used for devices that represent virtual devices. Virtual
+ devices aren't real devices on the SOC but are used to aggregate
+ resources in some special cases.
+qcom,qport: The offset index into the masters QoS register space.
+qcom,num-ports: The number of ports that the device has.
+qcom,ap-owned: Property that states if the device is "owned" by the Apps processor.
+ If true then the AP will program the QoS registers for the device
+ else it is done by RPM.
+qcom,connections: An array of phandles that represent the devices this device is connected to.;
+qcom,bus-dev: Phandle that represents the fabric device that this child node belongs to.
+qcom,qos-mode: QoS mode to be programmed for this device, only applicable for AP owned resource.
+qcom,prio-rd: Read priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio-wr: Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0: Priority low signal for a NoC bus master
+ (Can be 0/1/2).
+qcom,reg-prio1: Regulator mode Priority high signal for a NoC bus master if the master port is in
+ regulator QoS mode
+qcom,reg-prio0: Regulator Priority low signal for a NoC bus master if the master port is in
+ regulator Qos mode.
+ (Can be 0/1/2).
+qcom,prio1: Priority high signal for a NoC bus master
+qcom,bw_buffer: Optional parameter in KBytes used to specify a buffer value that should be added to
+ the voted bandwidth value to figure out the limiting bandwidth for a master port.
+qcom,buswidth: The buswidth at the device, default is 8 bytes.
+qcom,mas-rpm-id: For non-AP owned device this is the RPM id for devices that are bus masters.
+ This is the id that is used when sending a message to RPM for this device.
+qcom,slv-rpm-id: For non-AP owned device this is the RPM id for devices that are bus slaves.
+ This is the id that is used when sending a message to RPM for this device.
+qcom,blacklist: An array of phandles that represent devices that this device
+ cannot connect to either directly or via any number of
+ intermediate nodes.
+qcom,agg-ports: The number of aggregation ports on the bus.
+qcom,node-qos-bcms: Optional property to target specific BCMs to toggle during QoS configuration,
+ this is to ensure QoS register space is clocked and accessible. Array is
+ defined as follows: BCM node ID, VoteX, VoteY. The vectors must be defined in
+ sets of the three values aforementioned.
+qcom,prio: Default fixed priority for bus master.
+qcom,qos-lim-params: Array containing QoS limiter configurations defined as: Bandwidth, Saturation.
+ Must define "qcom,qos-lim-en" for these settings to take effect.
+qcom,qos-lim-en: Boolean to enable limiter settings, default is disabled.
+qcom,qos-reg-params: Array containing QoS regulator configurations defined as: Low Priority, High
+ Priority, Bandwidth, Saturation. Must define "qcom,qos-reg-regmode" for these
+ settings to take effect.
+qcom,qos-reg-mode: Array containing QoS regulator mode enablement: Read Enable, Write Enable,
+ default is disabled.
+qcom,forwarding: Boolean indicate Urgent Forwarding enablement.
+
+The following properties are optional as collecting data via coresight might
+and are present on child nodes that represent NOC devices. The documentation
+for coresight properties can be found in:
+Documentation/devicetree/bindings/coresight/coresight.txt
+
+coreisght-id Unique integer identifier for the bus.
+coresight-name Unique descriptive name of the bus.
+coresight-nr-inports Number of input ports on the bus.
+coresight-outports List of output port numbers on the bus.
+coresight-child-list List of phandles pointing to the children of this
+ component.
+coresight-child-ports List of input port numbers of the children.
+
+The following sub-nodes are optional parameters:
+
+qcom,node-qos-clks: Optional node listing all the clocks and regulators required for programming of
+ QoS registers. Usually these are associated with fabric nodes.
+ clock-names: An array of clock names for QoS programming,
+ clocks: An array of clock phandles corresponding to the clock names listed above.
+ clock-name-gdsc:
+ An optional property listing the regulator associated with a given clock name.
+
+Example:
+
+&ad_hoc_bus {
+ compatible = "msm-bus-device";
+ reg = <0x580000 0x62000>;
+ reg-names = "snoc-base";
+
+ fab_snoc: fab-snoc {
+ cell-id = <1024>;
+ label = "fab-snoc";
+ qcom,fab-dev;
+ qcom,bypass-qos-prg;
+ qcom,agg-scheme = <SCHEME_1>;
+ qcom,util-levels = <450000 133>,
+ <750000 154>;
+ qcom,base-name = "snoc-base";
+ qcom,base-offset = <0x7000>;
+ qcom,qos-off = <0x1000>;
+ qcom,bus-type = <1>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&clock_rpm clk_snoc_msmbus_clk>,
+ <&clock_rpm clk_snoc_msmbus_a_clk>;
+ qcom,node-qos-clks {
+ clock-names = "q0-clk", "q1-clk";
+ clocks = <&clock_gcc clk_q0_clk>,
+ <&clock_gcc clk_q1_clk>;
+ q0-clk-supply = <&gdsc_q0_clk>;
+ };
+ qcom,node-qos-bcms = <0x7011 0 1>;
+ qcom,prio = 1;
+ qcom,qos-lim-params = <1000 1000>;
+ qcom,qos-lim-en:
+ qcom,qos-reg-params = <1 2 1000 1000>;
+ qcom,qos-reg-mode = <1 1>;
+ };
+
+ mm_int_bimc: mm-int-bimc {
+ cell-id = <10003>;
+ label = "mm-int-bimc";
+ qcom,util-fact = <154>;
+ qcom,vrail-comp = <100>;
+ qcom,ap-owned;
+ qcom,connections = <&snoc_bimc_1_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ snoc_int_0: snoc-int-0 {
+ cell-id = <10004>;
+ label = "snoc-int-0";
+ qcom,connections = <&slv_qdss_stm &slv_imem &snoc_pnoc_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <99>;
+ qcom,slv-rpm-id = <130>;
+ qcom,buswidth = <8>;
+ };
+};
+
+
+The bus scaling driver also provides the ability to configure
+bus performance parameters across the entire chip-set.
+Various clients use MSM scaling APIs to request bandwidth
+between multiple master-slave pairs. The bus driver then finds
+the optimal path between the master and the slave, and aggregates
+the bandwidth and clock requests for all master-slave pairs on
+that path, and programs hardware accordingly.
+
+The device-tree data required for bus-scaling can be embedded within
+the clients' device nodes. The clients can register with the bus driver
+using the following properties:
+
+- qcom,msm-bus,name: String representing the client-name
+- qcom,msm-bus,num-cases: Total number of usecases
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
+ dual (active & sleep) contex
+- qcom,msm-bus,num-paths: Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+ master-id, slave-id, arbitrated bandwidth
+ in KBps, instantaneous bandwidth in KBps
+
+The following are optional properties for client's device nodes:
+
+- qcom,msm-bus,alc-voter: Boolean alc_voter flag to indicate that client
+ will vote as an Active Latency Client.
+- qcom,msm-bus,vectors-alc: Arrays of unsigned integers representing:
+ first access latency, idle time in ns, this
+ property is required if qcom,msm-bus,alc-voter
+ is present.
+
+Example for default client:
+
+ qcom,msm-bus,name = "client-name";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors =
+ <22 512 0 0>, <26 512 0 0>,
+ <22 512 320000 3200000>, <26 512 3200000 3200000>,
+ <22 512 160000 1600000>, <26 512 1600000 1600000>;
+
+Example for ALC client:
+
+ qcom,msm-bus,name = "client-name";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,alc-voter;
+ qcom,msm-bus,vectors-alc =
+ <0 0>,
+ <500 1600>;
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt
new file mode 100644
index 0000000..b68284c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt
@@ -0,0 +1,62 @@
+MSM Bus static bandwidth rules for adhoc bus topologies
+
+Buses are the interconnects between various devices. The devices are
+connected in different topologies. The static bandwidth rules allow
+setting up SOC specific rules to monitor certain bandwidth requests
+at different bus nodes. When the conditions of the rule are met
+the bus driver will be given a list of actions to be take on specific
+bus master ports (throttle on/off, what bandwidth to throttle to etc).
+
+The mandatory properties for bus driver are:
+
+compatible: "qcom,msm-bus-static-bw-rules"
+
+The static_rules node can have numerous rules for the different bandwidth voting
+conditions to be monitored. The mandatory properties for the rules are
+
+- qcom,src-nodes: An array of phandles denoting the source nodes
+ whose bandwidth votes need to be monitored.
+- qcom,src-field: This field represents the voted field of the
+ source node to be monitored. Possible values
+ are FLD_IB/FLD_AB/FLD_CLK
+- qcom,src-op: The operand to be used when evaluating a node's
+ bandwidth vote with a threshold.Possible values
+ are OP_LE/OP_LT/OP_GT/OP_GE.
+- qcom,thresh: The threshold in Kbytes/s to be used in vote
+ evaluation.
+- qcom,mode: The QoS mode to be applied when this rule's
+ criterion are satisfied. Possible values are
+ THROTTLE_ON/THROTTLE_OFF
+- qcom,dest-node: An array of phandles representing the nodes to
+ which the QoS mode is to be applied.
+
+The optional properties for the rule node are:
+- qcom,dest-bw: The destination bandwidth value in Kbytes/s to
+ be used toward the QoS mode for the destination
+ node.
+
+Example:
+ static-rules {
+ compatible = "qcom,msm-bus-static-bw-rules";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rule@0 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <1599078>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_apss>;
+ qcom,dest-bw = <1599078>;
+ };
+
+ rule@1 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_GT>;
+ qcom,thresh = <1599078>;
+ qcom,mode = <THROTTLE_OFF>;
+ qcom,dest-node = <&mas_apss>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
index 3b6cf9c..cc7d2ba 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -19,6 +19,7 @@
the following:
- "SYSTEM"
- "CARVEOUT"
+ - "SECURE_CARVEOUT"
- "DMA"
- "HYP_CMA"
- "SYSTEM_SECURE"
@@ -57,3 +58,32 @@
};
};
+
+"SECURE_CARVEOUT"
+
+This heap type is expected to contain multiple child nodes. Each child node
+shall contain the following required properties:
+
+- memory-region:
+Refer to Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+
+- token:
+A u32 containing the set of secure domains which will be able to access the
+memory-region.
+
+Example:
+qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ion-heap@14 {
+ reg = <14>;
+ qcom,ion-heap-type = "SECURE_CARVEOUT";
+
+ node1 {
+ memory-region = <&cp_region>;
+ token = <ION_FLAG_CP_TOUCH>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
index 74991a0..19a9d359 100644
--- a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -50,6 +50,11 @@
to enable.
- qcom,reset-aon-logic: If present, the GPU DEMET cells need to be reset while
enabling the GX GDSC.
+ - vdd_parent-supply: phandle to the regulator that this GDSC gates. If
+ present, need to vote for a minimum operational voltage
+ (LOW_SVS) on the GDSC parent regulator prior to
+ configuring it. The vote is removed once the GDSC FSM
+ has latched on to the new state.
- resets: reset specifier pair consisting of phandle for the reset controller
and reset lines used by this controller. These can be
supplied only if we support qcom,skip-logic-collapse.
diff --git a/Documentation/devicetree/bindings/soc/qcom/fsa4480-i2c.txt b/Documentation/devicetree/bindings/soc/qcom/fsa4480-i2c.txt
new file mode 100644
index 0000000..ae128eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/fsa4480-i2c.txt
@@ -0,0 +1,18 @@
+Qualcomm Technologies, Inc.
+
+Fairchild FSA4480 Device
+
+This device is used for switching orientation of USB-C analog
+and for display. It uses I2C communication to set the registers
+to configure the switches inside the FSA4480 chip to change
+orientation and also to set SBU1/SBU2 connections of USB-C.
+
+Required properties:
+ - compatible: Should be "qcom,fsa4480-i2c".
+ - reg: I2C device address of the device
+
+Example:
+ fsa4480: fsa4480@43 {
+ compatible = "qcom,fsa4480-i2c";
+ reg = <0x43>;
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink-pkt.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink-pkt.txt
new file mode 100644
index 0000000..b5c660c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink-pkt.txt
@@ -0,0 +1,40 @@
+Qualcomm Technologies, Inc. G-Link Packet Driver (glinkpkt)
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,glinkpkt"
+
+[Second level nodes]
+qcom,glinkpkt-channel-names
+Required properties:
+-qcom,glinkpkt-transport : the glinkpkt transport layer
+-qcom,glinkpkt-edge : the remote subsystem name
+-qcom,glinkpkt-ch-name : the glink channel name
+-qcom,glinkpkt-dev-name : the glinkpkt device name
+
+Example:
+
+ qcom,glink_pkt {
+ compatible = "qcom,glinkpkt";
+
+ qcom,glinkpkt-at-mdm0 {
+ qcom,glinkpkt-transport = "smd_trans";
+ qcom,glinkpkt-edge = "mpss";
+ qcom,glinkpkt-ch-name = "DS";
+ qcom,glinkpkt-dev-name = "at_mdm0";
+ };
+
+ qcom,glinkpkt-loopback-cntl {
+ qcom,glinkpkt-transport = "lloop";
+ qcom,glinkpkt-edge = "local";
+ qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+ qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+ };
+
+ qcom,glinkpkt-loopback-data {
+ qcom,glinkpkt-transport = "lloop";
+ qcom,glinkpkt-edge = "local";
+ qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+ qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink-probe.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink-probe.txt
new file mode 100644
index 0000000..badb9f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink-probe.txt
@@ -0,0 +1,69 @@
+Qualcomm Technologies, Inc. GLINK Probe
+
+This binding describes the GLINK Probe driver, a device
+that initializes the GLINK edge pairs within the system.
+
+- compatible :
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,glink"
+
+= SUBNODES
+The GLINK probe node must contain subnodes that describes the
+edge-pairs. See qcom,glink.txt for details on how to describe them.
+
+In addition to the properties in qcom,glink.txt, The GLINK Probe driver
+requires the qcom,glink-label and transport type to be specified in the
+subnodes.
+
+- transport :
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "smem", "spss", or "spi"
+
+- qcom,glink-label :
+ Usage: required
+ Value type: <stringlist>
+ Definition: specifies the identifier of the remote proc of this edge.
+
+= GLINK_SSR
+The GLINK probe driver also initializes the GLINK_SSR channel for the edges
+that it brings up. The channel should be specified as a subnode to each edge. In
+addition to the properties in qcom,glink.txt to specify a channel device node,
+the qcom,notify-edges property must be defined.
+
+- qcom,notify-edges :
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: list of phandles that specify the subsystems this glink edge
+ needs to receive ssr notifications about.
+
+= EXAMPLE
+qcom,glink {
+ compatible = "qcom,glink";
+ glink_modem: modem {
+ transport = "smem";
+ qcom,remote-pid = <0>;
+ mboxes = <&apcs_glb 8>;
+ mbox-names = "mpss_smem";
+ interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,modem_glink_ssr {
+ qcom,glink-channels = "glink_ssr";
+ qcom,notify-edges = <&glink_adsp>;
+ };
+ };
+
+ glink_adsp: adsp {
+ transport = "smem";
+ qcom,remote-pid = <2>;
+ mboxes = <&apcs_glb 4>;
+ mbox-names = "adsp_smem";
+ interrupts = <GIC_SPI 348 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,modem_glink_ssr {
+ qcom,glink-channels = "glink_ssr";
+ qcom,notify-edges = <&glink_modem>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 880dc1e..33beda5 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -69,6 +69,9 @@
events.
- qcom,num-gsi-evt-buffs: If present, specifies number of GSI based hardware accelerated
event buffers. 1 event buffer is needed per h/w accelerated endpoint.
+- qcom,gsi-reg-offset: USB GSI wrapper registers offset. It is must to provide this
+ if qcom,num-gsi-evt-buffs property is specified. Check dwc3-msm driver for order
+ and name of register offset need to provide.
- qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
which is used as a vote by driver to get max performance in perf mode.
- qcom,smmu-s1-bypass: If present, configure SMMU to bypass stage 1 translation.
diff --git a/arch/arm64/boot/dts/qcom/kona-bus.dtsi b/arch/arm64/boot/dts/qcom/kona-bus.dtsi
new file mode 100644
index 0000000..d5408f8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-bus.dtsi
@@ -0,0 +1,2245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+ ad_hoc_bus: ad-hoc-bus {
+ compatible = "qcom,msm-bus-device";
+ reg = <0x016E0000 0x1f180>,
+ <0x1700000 0x3d180>,
+ <0x1500000 0x28000>,
+ <0x90C0000 0x4200>,
+ <0x9100000 0xae200>,
+ <0x9100000 0xae200>,
+ <0x1740000 0x1f080>,
+ <0x1620000 0x1c200>,
+ <0x1620000 0x40000>,
+ <0x1620000 0x40000>,
+ <0x1700000 0x3d180>,
+ <0x9990000 0x1600>;
+
+ reg-names = "aggre1_noc-base", "aggre2_noc-base",
+ "config_noc-base", "dc_noc-base",
+ "mc_virt-base", "gem_noc-base",
+ "mmss_noc-base", "system_noc-base", "ipa_virt-base",
+ "camnoc_virt-base", "compute_noc-base", "npu_noc-base";
+
+ /*RSCs*/
+ rsc_apps: rsc-apps {
+ cell-id = <MSM_BUS_RSC_APPS>;
+ label = "apps_rsc";
+ qcom,rsc-dev;
+ qcom,req_state = <2>;
+ };
+
+ rsc_disp: rsc-disp {
+ cell-id = <MSM_BUS_RSC_DISP>;
+ label = "disp_rsc";
+ qcom,rsc-dev;
+ qcom,req_state = <2>;
+ };
+
+ /*BCMs*/
+ bcm_acv: bcm-acv {
+ cell-id = <MSM_BUS_BCM_ACV>;
+ label = "ACV";
+ qcom,bcm-name = "ACV";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_alc: bcm-alc {
+ cell-id = <MSM_BUS_BCM_ALC>;
+ label = "ALC";
+ qcom,bcm-name = "ALC";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mc0: bcm-mc0 {
+ cell-id = <MSM_BUS_BCM_MC0>;
+ label = "MC0";
+ qcom,bcm-name = "MC0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sh0: bcm-sh0 {
+ cell-id = <MSM_BUS_BCM_SH0>;
+ label = "SH0";
+ qcom,bcm-name = "SH0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mm0: bcm-mm0 {
+ cell-id = <MSM_BUS_BCM_MM0>;
+ label = "MM0";
+ qcom,bcm-name = "MM0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_ce0: bcm-ce0 {
+ cell-id = <MSM_BUS_BCM_CE0>;
+ label = "CE0";
+ qcom,bcm-name = "CE0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_ip0: bcm-ip0 {
+ cell-id = <MSM_BUS_BCM_IP0>;
+ label = "IP0";
+ qcom,bcm-name = "IP0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mm1: bcm-mm1 {
+ cell-id = <MSM_BUS_BCM_MM1>;
+ label = "MM1";
+ qcom,bcm-name = "MM1";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sh2: bcm-sh2 {
+ cell-id = <MSM_BUS_BCM_SH2>;
+ label = "SH2";
+ qcom,bcm-name = "SH2";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mm2: bcm-mm2 {
+ cell-id = <MSM_BUS_BCM_MM2>;
+ label = "MM2";
+ qcom,bcm-name = "MM2";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_qup0: bcm-qup0 {
+ cell-id = <MSM_BUS_BCM_QUP0>;
+ label = "QUP0";
+ qcom,bcm-name = "QUP0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sh3: bcm-sh3 {
+ cell-id = <MSM_BUS_BCM_SH3>;
+ label = "SH3";
+ qcom,bcm-name = "SH3";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mm3: bcm-mm3 {
+ cell-id = <MSM_BUS_BCM_MM3>;
+ label = "MM3";
+ qcom,bcm-name = "MM3";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sh4: bcm-sh4 {
+ cell-id = <MSM_BUS_BCM_SH4>;
+ label = "SH4";
+ qcom,bcm-name = "SH4";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn0: bcm-sn0 {
+ cell-id = <MSM_BUS_BCM_SN0>;
+ label = "SN0";
+ qcom,bcm-name = "SN0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_co0: bcm-co0 {
+ cell-id = <MSM_BUS_BCM_CO0>;
+ label = "CO0";
+ qcom,bcm-name = "CO0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_cn0: bcm-cn0 {
+ cell-id = <MSM_BUS_BCM_CN0>;
+ label = "CN0";
+ qcom,bcm-name = "CN0";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn1: bcm-sn1 {
+ cell-id = <MSM_BUS_BCM_SN1>;
+ label = "SN1";
+ qcom,bcm-name = "SN1";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn2: bcm-sn2 {
+ cell-id = <MSM_BUS_BCM_SN2>;
+ label = "SN2";
+ qcom,bcm-name = "SN2";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_co2: bcm-co2 {
+ cell-id = <MSM_BUS_BCM_CO2>;
+ label = "CO2";
+ qcom,bcm-name = "CO2";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn3: bcm-sn3 {
+ cell-id = <MSM_BUS_BCM_SN3>;
+ label = "SN3";
+ qcom,bcm-name = "SN3";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn4: bcm-sn4 {
+ cell-id = <MSM_BUS_BCM_SN4>;
+ label = "SN4";
+ qcom,bcm-name = "SN4";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn5: bcm-sn5 {
+ cell-id = <MSM_BUS_BCM_SN5>;
+ label = "SN5";
+ qcom,bcm-name = "SN5";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn6: bcm-sn6 {
+ cell-id = <MSM_BUS_BCM_SN6>;
+ label = "SN6";
+ qcom,bcm-name = "SN6";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn7: bcm-sn7 {
+ cell-id = <MSM_BUS_BCM_SN7>;
+ label = "SN7";
+ qcom,bcm-name = "SN7";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn8: bcm-sn8 {
+ cell-id = <MSM_BUS_BCM_SN8>;
+ label = "SN8";
+ qcom,bcm-name = "SN8";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn9: bcm-sn9 {
+ cell-id = <MSM_BUS_BCM_SN9>;
+ label = "SN9";
+ qcom,bcm-name = "SN9";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn11: bcm-sn11 {
+ cell-id = <MSM_BUS_BCM_SN11>;
+ label = "SN11";
+ qcom,bcm-name = "SN11";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sn12: bcm-sn12 {
+ cell-id = <MSM_BUS_BCM_SN12>;
+ label = "SN12";
+ qcom,bcm-name = "SN12";
+ qcom,rscs = <&rsc_apps>;
+ qcom,bcm-dev;
+ };
+
+ bcm_acv_display: bcm-acv_display {
+ cell-id = <MSM_BUS_BCM_ACV_DISPLAY>;
+ label = "ACV_DISPLAY";
+ qcom,bcm-name = "ACV";
+ qcom,rscs = <&rsc_disp>;
+ qcom,bcm-dev;
+ };
+
+ bcm_alc_display: bcm-alc_display {
+ cell-id = <MSM_BUS_BCM_ALC_DISPLAY>;
+ label = "ALC_DISPLAY";
+ qcom,bcm-name = "ALC";
+ qcom,rscs = <&rsc_disp>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mc0_display: bcm-mc0_display {
+ cell-id = <MSM_BUS_BCM_MC0_DISPLAY>;
+ label = "MC0_DISPLAY";
+ qcom,bcm-name = "MC0";
+ qcom,rscs = <&rsc_disp>;
+ qcom,bcm-dev;
+ };
+
+ bcm_sh0_display: bcm-sh0_display {
+ cell-id = <MSM_BUS_BCM_SH0_DISPLAY>;
+ label = "SH0_DISPLAY";
+ qcom,bcm-name = "SH0";
+ qcom,rscs = <&rsc_disp>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mm0_display: bcm-mm0_display {
+ cell-id = <MSM_BUS_BCM_MM0_DISPLAY>;
+ label = "MM0_DISPLAY";
+ qcom,bcm-name = "MM0";
+ qcom,rscs = <&rsc_disp>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mm1_display: bcm-mm1_display {
+ cell-id = <MSM_BUS_BCM_MM1_DISPLAY>;
+ label = "MM1_DISPLAY";
+ qcom,bcm-name = "MM1";
+ qcom,rscs = <&rsc_disp>;
+ qcom,bcm-dev;
+ };
+
+ bcm_mm2_display: bcm-mm2_display {
+ cell-id = <MSM_BUS_BCM_MM2_DISPLAY>;
+ label = "MM2_DISPLAY";
+ qcom,bcm-name = "MM2";
+ qcom,rscs = <&rsc_disp>;
+ qcom,bcm-dev;
+ };
+
+
+ /*Buses*/
+ fab_aggre1_noc: fab-aggre1_noc {
+ cell-id = <MSM_BUS_FAB_A1_NOC>;
+ label = "fab-aggre1_noc";
+ qcom,fab-dev;
+ qcom,base-name = "aggre1_noc-base";
+ qcom,qos-off = <4096>;
+ qcom,base-offset = <8192>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_aggre2_noc: fab-aggre2_noc {
+ cell-id = <MSM_BUS_FAB_A2_NOC>;
+ label = "fab-aggre2_noc";
+ qcom,fab-dev;
+ qcom,base-name = "aggre2_noc-base";
+ qcom,qos-off = <4096>;
+ qcom,base-offset = <12288>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_camnoc_virt: fab-camnoc_virt {
+ cell-id = <MSM_BUS_FAB_CAMNOC_VIRT>;
+ label = "fab-camnoc_virt";
+ qcom,fab-dev;
+ qcom,base-name = "camnoc_virt-base";
+ qcom,qos-off = <0>;
+ qcom,base-offset = <0>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ clocks = <>;
+ };
+
+ fab_compute_noc: fab-compute_noc {
+ cell-id = <MSM_BUS_FAB_COMP_NOC>;
+ label = "fab-compute_noc";
+ qcom,fab-dev;
+ qcom,base-name = "compute_noc-base";
+ qcom,qos-off = <2048>;
+ qcom,base-offset = <208896>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_config_noc: fab-config_noc {
+ cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
+ label = "fab-config_noc";
+ qcom,fab-dev;
+ qcom,base-name = "config_noc-base";
+ qcom,qos-off = <0>;
+ qcom,base-offset = <0>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_dc_noc: fab-dc_noc {
+ cell-id = <MSM_BUS_FAB_DC_NOC>;
+ label = "fab-dc_noc";
+ qcom,fab-dev;
+ qcom,base-name = "dc_noc-base";
+ qcom,qos-off = <0>;
+ qcom,base-offset = <0>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_gem_noc: fab-gem_noc {
+ cell-id = <MSM_BUS_FAB_GEM_NOC>;
+ label = "fab-gem_noc";
+ qcom,fab-dev;
+ qcom,base-name = "gem_noc-base";
+ qcom,qos-off = <4096>;
+ qcom,base-offset = <135168>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_ipa_virt: fab-ipa_virt {
+ cell-id = <MSM_BUS_FAB_IPA_VIRT>;
+ label = "fab-ipa_virt";
+ qcom,fab-dev;
+ qcom,base-name = "ipa_virt-base";
+ qcom,qos-off = <0>;
+ qcom,base-offset = <0>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ clocks = <>;
+ };
+
+ fab_mc_virt: fab-mc_virt {
+ cell-id = <MSM_BUS_FAB_MC_VIRT>;
+ label = "fab-mc_virt";
+ qcom,fab-dev;
+ qcom,base-name = "mc_virt-base";
+ qcom,qos-off = <0>;
+ qcom,base-offset = <0>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ clocks = <>;
+ };
+
+ fab_mmss_noc: fab-mmss_noc {
+ cell-id = <MSM_BUS_FAB_MMSS_NOC>;
+ label = "fab-mmss_noc";
+ qcom,fab-dev;
+ qcom,base-name = "mmss_noc-base";
+ qcom,qos-off = <2048>;
+ qcom,base-offset = <40960>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_npu_noc: fab-npu_noc {
+ cell-id = <MSM_BUS_FAB_NPU_NOC>;
+ label = "fab-npu_noc";
+ qcom,fab-dev;
+ qcom,base-name = "npu_noc-base";
+ qcom,qos-off = <0>;
+ qcom,base-offset = <0>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_system_noc: fab-system_noc {
+ cell-id = <MSM_BUS_FAB_SYS_NOC>;
+ label = "fab-system_noc";
+ qcom,fab-dev;
+ qcom,base-name = "system_noc-base";
+ qcom,qos-off = <4096>;
+ qcom,base-offset = <73728>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_gem_noc_display: fab-gem_noc_display {
+ cell-id = <MSM_BUS_FAB_GEM_NOC_DISPLAY>;
+ label = "fab-gem_noc_display";
+ qcom,fab-dev;
+ qcom,base-name = "gem_noc-base";
+ qcom,qos-off = <4096>;
+ qcom,base-offset = <135168>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+ fab_mc_virt_display: fab-mc_virt_display {
+ cell-id = <MSM_BUS_FAB_MC_VIRT_DISPLAY>;
+ label = "fab-mc_virt_display";
+ qcom,fab-dev;
+ qcom,base-name = "mc_virt-base";
+ qcom,qos-off = <0>;
+ qcom,base-offset = <0>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ clocks = <>;
+ };
+
+ fab_mmss_noc_display: fab-mmss_noc_display {
+ cell-id = <MSM_BUS_FAB_MMSS_NOC_DISPLAY>;
+ label = "fab-mmss_noc_display";
+ qcom,fab-dev;
+ qcom,base-name = "mmss_noc-base";
+ qcom,qos-off = <2048>;
+ qcom,base-offset = <40960>;
+ qcom,sbm-offset = <0>;
+ qcom,bypass-qos-prg;
+ qcom,bus-type = <1>;
+ clocks = <>;
+ };
+
+
+ /*Masters*/
+
+ mas_qhm_a1noc_cfg: mas-qhm-a1noc-cfg {
+ cell-id = <MSM_BUS_MASTER_A1NOC_CFG>;
+ label = "mas-qhm-a1noc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_srvc_aggre1_noc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ };
+
+ mas_qhm_qspi: mas-qhm-qspi {
+ cell-id = <MSM_BUS_MASTER_QSPI_0>;
+ label = "mas-qhm-qspi";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <7>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_qhm_qup1: mas-qhm-qup1 {
+ cell-id = <MSM_BUS_MASTER_QUP_1>;
+ label = "mas-qhm-qup1";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <5>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,bcms = <&bcm_qup0>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_qhm_qup2: mas-qhm-qup2 {
+ cell-id = <MSM_BUS_MASTER_QUP_2>;
+ label = "mas-qhm-qup2";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <6>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,bcms = <&bcm_qup0>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_qhm_tsif: mas-qhm-tsif {
+ cell-id = <MSM_BUS_MASTER_TSIF>;
+ label = "mas-qhm-tsif";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <8>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_pcie3_modem: mas-xm-pcie3-modem {
+ cell-id = <MSM_BUS_MASTER_PCIE_2>;
+ label = "mas-xm-pcie3-modem";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <4>;
+ qcom,connections = <&slv_qns_pcie_modem_mem_noc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_sdc4: mas-xm-sdc4 {
+ cell-id = <MSM_BUS_MASTER_SDCC_4>;
+ label = "mas-xm-sdc4";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <2>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_ufs_mem: mas-xm-ufs-mem {
+ cell-id = <MSM_BUS_MASTER_UFS_MEM>;
+ label = "mas-xm-ufs-mem";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <3>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_usb3_0: mas-xm-usb3-0 {
+ cell-id = <MSM_BUS_MASTER_USB3>;
+ label = "mas-xm-usb3-0";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <0>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_usb3_1: mas-xm-usb3-1 {
+ cell-id = <MSM_BUS_MASTER_USB3_1>;
+ label = "mas-xm-usb3-1";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <1>;
+ qcom,connections = <&slv_qns_a1noc_snoc>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_qhm_a2noc_cfg: mas-qhm-a2noc-cfg {
+ cell-id = <MSM_BUS_MASTER_A2NOC_CFG>;
+ label = "mas-qhm-a2noc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_srvc_aggre2_noc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ };
+
+ mas_qhm_qdss_bam: mas-qhm-qdss-bam {
+ cell-id = <MSM_BUS_MASTER_QDSS_BAM>;
+ label = "mas-qhm-qdss-bam";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <11>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_qhm_qup0: mas-qhm-qup0 {
+ cell-id = <MSM_BUS_MASTER_QUP_0>;
+ label = "mas-qhm-qup0";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <12>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,bcms = <&bcm_qup0>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_qnm_cnoc: mas-qnm-cnoc {
+ cell-id = <MSM_BUS_MASTER_CNOC_A2NOC>;
+ label = "mas-qnm-cnoc";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <0>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ qcom,forwarding;
+ };
+
+ mas_qxm_crypto: mas-qxm-crypto {
+ cell-id = <MSM_BUS_MASTER_CRYPTO_CORE_0>;
+ label = "mas-qxm-crypto";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <1>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,bcms = <&bcm_ce0>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ qcom,forwarding;
+ };
+
+ mas_qxm_ipa: mas-qxm-ipa {
+ cell-id = <MSM_BUS_MASTER_IPA>;
+ label = "mas-qxm-ipa";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <2>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ qcom,forwarding;
+ };
+
+ mas_xm_pcie3_0: mas-xm-pcie3-0 {
+ cell-id = <MSM_BUS_MASTER_PCIE>;
+ label = "mas-xm-pcie3-0";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <8>;
+ qcom,connections = <&slv_qns_pcie_mem_noc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_pcie3_1: mas-xm-pcie3-1 {
+ cell-id = <MSM_BUS_MASTER_PCIE_1>;
+ label = "mas-xm-pcie3-1";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <9>;
+ qcom,connections = <&slv_qns_pcie_mem_noc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_qdss_etr: mas-xm-qdss-etr {
+ cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
+ label = "mas-xm-qdss-etr";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <7>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_sdc2: mas-xm-sdc2 {
+ cell-id = <MSM_BUS_MASTER_SDCC_2>;
+ label = "mas-xm-sdc2";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <3>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_xm_ufs_card: mas-xm-ufs-card {
+ cell-id = <MSM_BUS_MASTER_UFS_CARD>;
+ label = "mas-xm-ufs-card";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <4>;
+ qcom,connections = <&slv_qns_a2noc_snoc>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ };
+
+ mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP>;
+ label = "mas-qxm-camnoc-hf0-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP>;
+ label = "mas-qxm-camnoc-hf1-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP>;
+ label = "mas-qxm-camnoc-sf-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_camnoc_uncomp>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ mas_qnm_npu: mas-qnm-npu {
+ cell-id = <MSM_BUS_MASTER_NPU>;
+ label = "mas-qnm-npu";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <6 7>;
+ qcom,connections = <&slv_qns_cdsp_mem_noc>;
+ qcom,bus-dev = <&fab_compute_noc>;
+ qcom,bcms = <&bcm_co2>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_snoc: mas-qnm-snoc {
+ cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
+ label = "mas-qnm-snoc";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qhs_compute_dsp
+ &slv_qhs_camera_cfg &slv_qhs_tlmm1
+ &slv_qhs_tlmm0 &slv_qhs_sdc4
+ &slv_qhs_tlmm2 &slv_qhs_sdc2
+ &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg
+ &slv_qhs_snoc_cfg &slv_qhs_pdm
+ &slv_qhs_cx_rdpm &slv_qhs_pcie1_cfg
+ &slv_qhs_a2_noc_cfg &slv_qhs_qdss_cfg
+ &slv_qhs_display_cfg &slv_qhs_pcie_modem_cfg
+ &slv_qhs_tcsr &slv_qhs_dcc_cfg
+ &slv_qhs_ddrss_cfg &slv_qhs_ipc_router
+ &slv_qhs_pcie0_cfg &slv_qhs_cpr_mmcx
+ &slv_qhs_npu_cfg &slv_qhs_ahb2phy0
+ &slv_qhs_ahb2phy1 &slv_qhs_gpuss_cfg
+ &slv_qhs_venus_cfg &slv_qhs_tsif
+ &slv_qhs_ipa &slv_qhs_imem_cfg
+ &slv_qhs_usb3_0 &slv_srvc_cnoc
+ &slv_qhs_ufs_card_cfg &slv_qhs_usb3_1
+ &slv_qhs_lpass_cfg &slv_qhs_cpr_cx
+ &slv_qhs_a1_noc_cfg &slv_qhs_aoss
+ &slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
+ &slv_qhs_qspi &slv_qhs_crypto0_cfg
+ &slv_qhs_pimem_cfg &slv_qhs_cpr_mx
+ &slv_qhs_qup0 &slv_qhs_qup1
+ &slv_qhs_qup2 &slv_qhs_clk_ctl>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ mas_xm_qdss_dap: mas-xm-qdss-dap {
+ cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
+ label = "mas-xm-qdss-dap";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qhs_compute_dsp
+ &slv_qhs_camera_cfg &slv_qhs_tlmm1
+ &slv_qhs_tlmm0 &slv_qhs_sdc4
+ &slv_qhs_tlmm2 &slv_qhs_sdc2
+ &slv_qhs_mnoc_cfg &slv_qhs_ufs_mem_cfg
+ &slv_qhs_snoc_cfg &slv_qhs_pdm
+ &slv_qhs_cx_rdpm &slv_qhs_pcie1_cfg
+ &slv_qhs_a2_noc_cfg &slv_qhs_qdss_cfg
+ &slv_qhs_display_cfg &slv_qhs_pcie_modem_cfg
+ &slv_qhs_tcsr &slv_qhs_dcc_cfg
+ &slv_qhs_ddrss_cfg &slv_qhs_ipc_router
+ &slv_qns_cnoc_a2noc &slv_qhs_pcie0_cfg
+ &slv_qhs_cpr_mmcx &slv_qhs_npu_cfg
+ &slv_qhs_ahb2phy0 &slv_qhs_ahb2phy1
+ &slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
+ &slv_qhs_tsif &slv_qhs_ipa
+ &slv_qhs_imem_cfg &slv_qhs_usb3_0
+ &slv_srvc_cnoc &slv_qhs_ufs_card_cfg
+ &slv_qhs_usb3_1 &slv_qhs_lpass_cfg
+ &slv_qhs_cpr_cx &slv_qhs_a1_noc_cfg
+ &slv_qhs_aoss &slv_qhs_prng
+ &slv_qhs_vsense_ctrl_cfg &slv_qhs_qspi
+ &slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg
+ &slv_qhs_cpr_mx &slv_qhs_qup0
+ &slv_qhs_qup1 &slv_qhs_qup2
+ &slv_qhs_clk_ctl>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ mas_qhm_cnoc_dc_noc: mas-qhm-cnoc-dc-noc {
+ cell-id = <MSM_BUS_MASTER_CNOC_DC_NOC>;
+ label = "mas-qhm-cnoc-dc-noc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qhs_memnoc &slv_qhs_llcc>;
+ qcom,bus-dev = <&fab_dc_noc>;
+ };
+
+ mas_alm_gpu_tcu: mas-alm-gpu-tcu {
+ cell-id = <MSM_BUS_MASTER_GPU_TCU>;
+ label = "mas-alm-gpu-tcu";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <127>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,bcms = <&bcm_sh2>;
+ qcom,ap-owned;
+ qcom,prio = <6>;
+ };
+
+ mas_alm_sys_tcu: mas-alm-sys-tcu {
+ cell-id = <MSM_BUS_MASTER_SYS_TCU>;
+ label = "mas-alm-sys-tcu";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <128>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,bcms = <&bcm_sh2>;
+ qcom,ap-owned;
+ qcom,prio = <6>;
+ };
+
+ mas_chm_apps: mas-chm-apps {
+ cell-id = <MSM_BUS_MASTER_AMPSS_M0>;
+ label = "mas-chm-apps";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc &slv_qns_sys_pcie>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,bcms = <&bcm_sh4>;
+ };
+
+ mas_qhm_gemnoc_cfg: mas-qhm-gemnoc-cfg {
+ cell-id = <MSM_BUS_MASTER_GEM_NOC_CFG>;
+ label = "mas-qhm-gemnoc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_srvc_odd_gemnoc
+ &slv_srvc_even_gemnoc &slv_srvc_sys_gemnoc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ };
+
+ mas_qnm_cmpnoc: mas-qnm-cmpnoc {
+ cell-id = <MSM_BUS_MASTER_COMPUTE_NOC>;
+ label = "mas-qnm-cmpnoc";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <0 64>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,bcms = <&bcm_sh3>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_gpu: mas-qnm-gpu {
+ cell-id = <MSM_BUS_MASTER_GRAPHICS_3D>;
+ label = "mas-qnm-gpu";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <1 65>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_mnoc_hf: mas-qnm-mnoc-hf {
+ cell-id = <MSM_BUS_MASTER_MNOC_HF_MEM_NOC>;
+ label = "mas-qnm-mnoc-hf";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <2 66>;
+ qcom,connections = <&slv_qns_llcc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_mnoc_sf: mas-qnm-mnoc-sf {
+ cell-id = <MSM_BUS_MASTER_MNOC_SF_MEM_NOC>;
+ label = "mas-qnm-mnoc-sf";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <3 67>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_pcie: mas-qnm-pcie {
+ cell-id = <MSM_BUS_MASTER_GEM_NOC_PCIE_SNOC>;
+ label = "mas-qnm-pcie";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <129>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ };
+
+ mas_qnm_snoc_gc: mas-qnm-snoc-gc {
+ cell-id = <MSM_BUS_MASTER_SNOC_GC_MEM_NOC>;
+ label = "mas-qnm-snoc-gc";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <130>;
+ qcom,connections = <&slv_qns_llcc>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_snoc_sf: mas-qnm-snoc-sf {
+ cell-id = <MSM_BUS_MASTER_SNOC_SF_MEM_NOC>;
+ label = "mas-qnm-snoc-sf";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <131>;
+ qcom,connections = <&slv_qns_llcc
+ &slv_qns_gem_noc_snoc &slv_qns_sys_pcie>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_ipa_core_master: mas-ipa-core-master {
+ cell-id = <MSM_BUS_MASTER_IPA_CORE>;
+ label = "mas-ipa-core-master";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_ipa_core_slave>;
+ qcom,bus-dev = <&fab_ipa_virt>;
+ };
+
+ mas_llcc_mc: mas-llcc-mc {
+ cell-id = <MSM_BUS_MASTER_LLCC>;
+ label = "mas-llcc-mc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <4>;
+ qcom,connections = <&slv_ebi>;
+ qcom,bus-dev = <&fab_mc_virt>;
+ };
+
+ mas_qhm_mnoc_cfg: mas-qhm-mnoc-cfg {
+ cell-id = <MSM_BUS_MASTER_CNOC_MNOC_CFG>;
+ label = "mas-qhm-mnoc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_srvc_mnoc>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ };
+
+ mas_qnm_camnoc_hf: mas-qnm-camnoc-hf {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_HF>;
+ label = "mas-qnm-camnoc-hf";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <4 5>;
+ qcom,connections = <&slv_qns_mem_noc_hf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm1>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_camnoc_icp: mas-qnm-camnoc-icp {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_ICP>;
+ label = "mas-qnm-camnoc-icp";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <2>;
+ qcom,connections = <&slv_qns_mem_noc_sf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm3>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_camnoc_sf: mas-qnm-camnoc-sf {
+ cell-id = <MSM_BUS_MASTER_CAMNOC_SF>;
+ label = "mas-qnm-camnoc-sf";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <0 1>;
+ qcom,connections = <&slv_qns_mem_noc_sf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm3>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_video0: mas-qnm-video0 {
+ cell-id = <MSM_BUS_MASTER_VIDEO_P0>;
+ label = "mas-qnm-video0";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <12>;
+ qcom,connections = <&slv_qns_mem_noc_sf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm3>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_video1: mas-qnm-video1 {
+ cell-id = <MSM_BUS_MASTER_VIDEO_P1>;
+ label = "mas-qnm-video1";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <13>;
+ qcom,connections = <&slv_qns_mem_noc_sf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm3>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_video_cvp: mas-qnm-video-cvp {
+ cell-id = <MSM_BUS_MASTER_VIDEO_PROC>;
+ label = "mas-qnm-video-cvp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <14>;
+ qcom,connections = <&slv_qns_mem_noc_sf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm3>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qxm_mdp0: mas-qxm-mdp0 {
+ cell-id = <MSM_BUS_MASTER_MDP_PORT0>;
+ label = "mas-qxm-mdp0";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <6>;
+ qcom,connections = <&slv_qns_mem_noc_hf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm1>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qxm_mdp1: mas-qxm-mdp1 {
+ cell-id = <MSM_BUS_MASTER_MDP_PORT1>;
+ label = "mas-qxm-mdp1";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <8>;
+ qcom,connections = <&slv_qns_mem_noc_hf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,bcms = <&bcm_mm1>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_qxm_rot: mas-qxm-rot {
+ cell-id = <MSM_BUS_MASTER_ROTATOR>;
+ label = "mas-qxm-rot";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <10>;
+ qcom,connections = <&slv_qns_mem_noc_sf>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,ap-owned;
+ qcom,prio = <0>;
+ qcom,forwarding;
+ };
+
+ mas_amm_npu_sys: mas-amm-npu-sys {
+ cell-id = <MSM_BUS_MASTER_NPU_SYS>;
+ label = "mas-amm-npu-sys";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <4>;
+ qcom,connections = <&slv_qns_npu_sys>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ mas_amm_npu_sys_cdp_w: mas-amm-npu-sys-cdp-w {
+ cell-id = <MSM_BUS_MASTER_NPU_CDP>;
+ label = "mas-amm-npu-sys-cdp-w";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <2>;
+ qcom,connections = <&slv_qns_npu_sys>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ mas_qhm_cfg: mas-qhm-cfg {
+ cell-id = <MSM_BUS_MASTER_NPU_NOC_CFG>;
+ label = "mas-qhm-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_srvc_noc
+ &slv_qhs_isense &slv_qhs_llm
+ &slv_qhs_dma_bwmon &slv_qhs_cp
+ &slv_qhs_tcm &slv_qhs_cal_dp0
+ &slv_qhs_cal_dp1 &slv_qhs_dpm>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ mas_qhm_snoc_cfg: mas-qhm-snoc-cfg {
+ cell-id = <MSM_BUS_MASTER_SNOC_CFG>;
+ label = "mas-qhm-snoc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_srvc_snoc>;
+ qcom,bus-dev = <&fab_system_noc>;
+ };
+
+ mas_qnm_aggre1_noc: mas-qnm-aggre1-noc {
+ cell-id = <MSM_BUS_A1NOC_SNOC_MAS>;
+ label = "mas-qnm-aggre1-noc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_gemnoc_sf>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn7>;
+ };
+
+ mas_qnm_aggre2_noc: mas-qnm-aggre2-noc {
+ cell-id = <MSM_BUS_A2NOC_SNOC_MAS>;
+ label = "mas-qnm-aggre2-noc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qns_gemnoc_sf>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn8>;
+ };
+
+ mas_qnm_gemnoc: mas-qnm-gemnoc {
+ cell-id = <MSM_BUS_MASTER_GEM_NOC_SNOC>;
+ label = "mas-qnm-gemnoc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_qxs_pimem
+ &slv_qxs_imem &slv_qhs_apss
+ &slv_qns_cnoc &slv_xs_sys_tcu_cfg
+ &slv_xs_qdss_stm>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn11>;
+ };
+
+ mas_qnm_gemnoc_pcie: mas-qnm-gemnoc-pcie {
+ cell-id = <MSM_BUS_MASTER_GEM_NOC_PCIE_SNOC>;
+ label = "mas-qnm-gemnoc-pcie";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,connections = <&slv_xs_pcie_modem
+ &slv_xs_pcie_0 &slv_xs_pcie_1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn9>;
+ };
+
+ mas_qxm_pimem: mas-qxm-pimem {
+ cell-id = <MSM_BUS_MASTER_PIMEM>;
+ label = "mas-qxm-pimem";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <0>;
+ qcom,connections = <&slv_qns_gemnoc_gc>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ qcom,forwarding;
+ };
+
+ mas_xm_gic: mas-xm-gic {
+ cell-id = <MSM_BUS_MASTER_GIC>;
+ label = "mas-xm-gic";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <1>;
+ qcom,connections = <&slv_qns_gemnoc_gc>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,ap-owned;
+ qcom,prio = <2>;
+ qcom,forwarding;
+ };
+
+ mas_qnm_mnoc_hf_display: mas-qnm-mnoc-hf_display {
+ cell-id = <MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY>;
+ label = "mas-qnm-mnoc-hf_display";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <2 66>;
+ qcom,connections = <&slv_qns_llcc_display>;
+ qcom,bus-dev = <&fab_gem_noc_display>;
+ };
+
+ mas_qnm_mnoc_sf_display: mas-qnm-mnoc-sf_display {
+ cell-id = <MSM_BUS_MASTER_MNOC_SF_MEM_NOC_DISPLAY>;
+ label = "mas-qnm-mnoc-sf_display";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,qport = <3 67>;
+ qcom,connections = <&slv_qns_llcc_display>;
+ qcom,bus-dev = <&fab_gem_noc_display>;
+ };
+
+ mas_llcc_mc_display: mas-llcc-mc_display {
+ cell-id = <MSM_BUS_MASTER_LLCC_DISPLAY>;
+ label = "mas-llcc-mc_display";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <4>;
+ qcom,connections = <&slv_ebi_display>;
+ qcom,bus-dev = <&fab_mc_virt_display>;
+ };
+
+ mas_qxm_mdp0_display: mas-qxm-mdp0_display {
+ cell-id = <MSM_BUS_MASTER_MDP_PORT0_DISPLAY>;
+ label = "mas-qxm-mdp0_display";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <6>;
+ qcom,connections = <&slv_qns_mem_noc_hf_display>;
+ qcom,bus-dev = <&fab_mmss_noc_display>;
+ qcom,bcms = <&bcm_mm1_display>;
+ };
+
+ mas_qxm_mdp1_display: mas-qxm-mdp1_display {
+ cell-id = <MSM_BUS_MASTER_MDP_PORT1_DISPLAY>;
+ label = "mas-qxm-mdp1_display";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <8>;
+ qcom,connections = <&slv_qns_mem_noc_hf_display>;
+ qcom,bus-dev = <&fab_mmss_noc_display>;
+ qcom,bcms = <&bcm_mm1_display>;
+ };
+
+ mas_qxm_rot_display: mas-qxm-rot_display {
+ cell-id = <MSM_BUS_MASTER_ROTATOR_DISPLAY>;
+ label = "mas-qxm-rot_display";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,qport = <10>;
+ qcom,connections = <&slv_qns_mem_noc_sf_display>;
+ qcom,bus-dev = <&fab_mmss_noc_display>;
+ };
+
+ /*Internal nodes*/
+
+ /*Slaves*/
+
+ slv_qns_a1noc_snoc:slv-qns-a1noc-snoc {
+ cell-id = <MSM_BUS_A1NOC_SNOC_SLV>;
+ label = "slv-qns-a1noc-snoc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,connections = <&mas_qnm_aggre1_noc>;
+ };
+
+ slv_qns_pcie_modem_mem_noc:slv-qns-pcie-modem-mem-noc {
+ cell-id = <MSM_BUS_SLAVE_ANOC_PCIE_GEM_NOC_1>;
+ label = "slv-qns-pcie-modem-mem-noc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ qcom,connections = <&mas_qnm_pcie>;
+ qcom,bcms = <&bcm_sn12>;
+ };
+
+ slv_srvc_aggre1_noc:slv-srvc-aggre1-noc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_A1NOC>;
+ label = "slv-srvc-aggre1-noc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_aggre1_noc>;
+ };
+
+ slv_qns_a2noc_snoc:slv-qns-a2noc-snoc {
+ cell-id = <MSM_BUS_A2NOC_SNOC_SLV>;
+ label = "slv-qns-a2noc-snoc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,connections = <&mas_qnm_aggre2_noc>;
+ };
+
+ slv_qns_pcie_mem_noc:slv-qns-pcie-mem-noc {
+ cell-id = <MSM_BUS_SLAVE_ANOC_PCIE_GEM_NOC>;
+ label = "slv-qns-pcie-mem-noc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ qcom,connections = <&mas_qnm_pcie>;
+ qcom,bcms = <&bcm_sn12>;
+ };
+
+ slv_srvc_aggre2_noc:slv-srvc-aggre2-noc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_A2NOC>;
+ label = "slv-srvc-aggre2-noc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_aggre2_noc>;
+ };
+
+ slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp {
+ cell-id = <MSM_BUS_SLAVE_CAMNOC_UNCOMP>;
+ label = "slv-qns-camnoc-uncomp";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_camnoc_virt>;
+ };
+
+ slv_qns_cdsp_mem_noc:slv-qns-cdsp-mem-noc {
+ cell-id = <MSM_BUS_SLAVE_CDSP_MEM_NOC>;
+ label = "slv-qns-cdsp-mem-noc";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,bus-dev = <&fab_compute_noc>;
+ qcom,connections = <&mas_qnm_cmpnoc>;
+ qcom,bcms = <&bcm_co0>;
+ };
+
+ slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
+ cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
+ label = "slv-qhs-a1-noc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,connections = <&mas_qhm_a1noc_cfg>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_a2_noc_cfg:slv-qhs-a2-noc-cfg {
+ cell-id = <MSM_BUS_SLAVE_A2NOC_CFG>;
+ label = "slv-qhs-a2-noc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,connections = <&mas_qhm_a2noc_cfg>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_ahb2phy0:slv-qhs-ahb2phy0 {
+ cell-id = <MSM_BUS_SLAVE_AHB2PHY_SOUTH>;
+ label = "slv-qhs-ahb2phy0";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_ahb2phy1:slv-qhs-ahb2phy1 {
+ cell-id = <MSM_BUS_SLAVE_AHB2PHY_NORTH>;
+ label = "slv-qhs-ahb2phy1";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_aoss:slv-qhs-aoss {
+ cell-id = <MSM_BUS_SLAVE_AOSS>;
+ label = "slv-qhs-aoss";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_camera_cfg:slv-qhs-camera-cfg {
+ cell-id = <MSM_BUS_SLAVE_CAMERA_CFG>;
+ label = "slv-qhs-camera-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_clk_ctl:slv-qhs-clk-ctl {
+ cell-id = <MSM_BUS_SLAVE_CLK_CTL>;
+ label = "slv-qhs-clk-ctl";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_compute_dsp:slv-qhs-compute-dsp {
+ cell-id = <MSM_BUS_SLAVE_CDSP_CFG>;
+ label = "slv-qhs-compute-dsp";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_cpr_cx:slv-qhs-cpr-cx {
+ cell-id = <MSM_BUS_SLAVE_RBCPR_CX_CFG>;
+ label = "slv-qhs-cpr-cx";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_cpr_mmcx:slv-qhs-cpr-mmcx {
+ cell-id = <MSM_BUS_SLAVE_RBCPR_MMCX_CFG>;
+ label = "slv-qhs-cpr-mmcx";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_cpr_mx:slv-qhs-cpr-mx {
+ cell-id = <MSM_BUS_SLAVE_RBCPR_MX_CFG>;
+ label = "slv-qhs-cpr-mx";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_crypto0_cfg:slv-qhs-crypto0-cfg {
+ cell-id = <MSM_BUS_SLAVE_CRYPTO_0_CFG>;
+ label = "slv-qhs-crypto0-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_cx_rdpm:slv-qhs-cx-rdpm {
+ cell-id = <MSM_BUS_SLAVE_CX_RDPM>;
+ label = "slv-qhs-cx-rdpm";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_dcc_cfg:slv-qhs-dcc-cfg {
+ cell-id = <MSM_BUS_SLAVE_DCC_CFG>;
+ label = "slv-qhs-dcc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_ddrss_cfg:slv-qhs-ddrss-cfg {
+ cell-id = <MSM_BUS_SLAVE_CNOC_DDRSS>;
+ label = "slv-qhs-ddrss-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,connections = <&mas_qhm_cnoc_dc_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_display_cfg:slv-qhs-display-cfg {
+ cell-id = <MSM_BUS_SLAVE_DISPLAY_CFG>;
+ label = "slv-qhs-display-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_gpuss_cfg:slv-qhs-gpuss-cfg {
+ cell-id = <MSM_BUS_SLAVE_GRAPHICS_3D_CFG>;
+ label = "slv-qhs-gpuss-cfg";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_imem_cfg:slv-qhs-imem-cfg {
+ cell-id = <MSM_BUS_SLAVE_IMEM_CFG>;
+ label = "slv-qhs-imem-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_ipa:slv-qhs-ipa {
+ cell-id = <MSM_BUS_SLAVE_IPA_CFG>;
+ label = "slv-qhs-ipa";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_ipc_router:slv-qhs-ipc-router {
+ cell-id = <MSM_BUS_SLAVE_IPC_ROUTER_CFG>;
+ label = "slv-qhs-ipc-router";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_lpass_cfg:slv-qhs-lpass-cfg {
+ cell-id = <MSM_BUS_SLAVE_LPASS>;
+ label = "slv-qhs-lpass-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_mnoc_cfg:slv-qhs-mnoc-cfg {
+ cell-id = <MSM_BUS_SLAVE_CNOC_MNOC_CFG>;
+ label = "slv-qhs-mnoc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,connections = <&mas_qhm_mnoc_cfg>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_npu_cfg:slv-qhs-npu-cfg {
+ cell-id = <MSM_BUS_SLAVE_NPU_CFG>;
+ label = "slv-qhs-npu-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,connections = <&mas_qhm_cfg>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_pcie0_cfg:slv-qhs-pcie0-cfg {
+ cell-id = <MSM_BUS_SLAVE_PCIE_0_CFG>;
+ label = "slv-qhs-pcie0-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_pcie1_cfg:slv-qhs-pcie1-cfg {
+ cell-id = <MSM_BUS_SLAVE_PCIE_1_CFG>;
+ label = "slv-qhs-pcie1-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_pcie_modem_cfg:slv-qhs-pcie-modem-cfg {
+ cell-id = <MSM_BUS_SLAVE_PCIE_2_CFG>;
+ label = "slv-qhs-pcie-modem-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_pdm:slv-qhs-pdm {
+ cell-id = <MSM_BUS_SLAVE_PDM>;
+ label = "slv-qhs-pdm";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_pimem_cfg:slv-qhs-pimem-cfg {
+ cell-id = <MSM_BUS_SLAVE_PIMEM_CFG>;
+ label = "slv-qhs-pimem-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_prng:slv-qhs-prng {
+ cell-id = <MSM_BUS_SLAVE_PRNG>;
+ label = "slv-qhs-prng";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_qdss_cfg:slv-qhs-qdss-cfg {
+ cell-id = <MSM_BUS_SLAVE_QDSS_CFG>;
+ label = "slv-qhs-qdss-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_qspi:slv-qhs-qspi {
+ cell-id = <MSM_BUS_SLAVE_QSPI_0>;
+ label = "slv-qhs-qspi";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_qup0:slv-qhs-qup0 {
+ cell-id = <MSM_BUS_SLAVE_QUP_0>;
+ label = "slv-qhs-qup0";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_qup1:slv-qhs-qup1 {
+ cell-id = <MSM_BUS_SLAVE_QUP_1>;
+ label = "slv-qhs-qup1";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_qup2:slv-qhs-qup2 {
+ cell-id = <MSM_BUS_SLAVE_QUP_2>;
+ label = "slv-qhs-qup2";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_sdc2:slv-qhs-sdc2 {
+ cell-id = <MSM_BUS_SLAVE_SDCC_2>;
+ label = "slv-qhs-sdc2";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_sdc4:slv-qhs-sdc4 {
+ cell-id = <MSM_BUS_SLAVE_SDCC_4>;
+ label = "slv-qhs-sdc4";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_snoc_cfg:slv-qhs-snoc-cfg {
+ cell-id = <MSM_BUS_SLAVE_SNOC_CFG>;
+ label = "slv-qhs-snoc-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,connections = <&mas_qhm_snoc_cfg>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_tcsr:slv-qhs-tcsr {
+ cell-id = <MSM_BUS_SLAVE_TCSR>;
+ label = "slv-qhs-tcsr";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_tlmm0:slv-qhs-tlmm0 {
+ cell-id = <MSM_BUS_SLAVE_TLMM_NORTH>;
+ label = "slv-qhs-tlmm0";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_tlmm1:slv-qhs-tlmm1 {
+ cell-id = <MSM_BUS_SLAVE_TLMM_SOUTH>;
+ label = "slv-qhs-tlmm1";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_tlmm2:slv-qhs-tlmm2 {
+ cell-id = <MSM_BUS_SLAVE_TLMM_WEST>;
+ label = "slv-qhs-tlmm2";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_tsif:slv-qhs-tsif {
+ cell-id = <MSM_BUS_SLAVE_TSIF>;
+ label = "slv-qhs-tsif";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_ufs_card_cfg:slv-qhs-ufs-card-cfg {
+ cell-id = <MSM_BUS_SLAVE_UFS_CARD_CFG>;
+ label = "slv-qhs-ufs-card-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_ufs_mem_cfg:slv-qhs-ufs-mem-cfg {
+ cell-id = <MSM_BUS_SLAVE_UFS_MEM_CFG>;
+ label = "slv-qhs-ufs-mem-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_usb3_0:slv-qhs-usb3-0 {
+ cell-id = <MSM_BUS_SLAVE_USB3>;
+ label = "slv-qhs-usb3-0";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_usb3_1:slv-qhs-usb3-1 {
+ cell-id = <MSM_BUS_SLAVE_USB3_1>;
+ label = "slv-qhs-usb3-1";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_venus_cfg:slv-qhs-venus-cfg {
+ cell-id = <MSM_BUS_SLAVE_VENUS_CFG>;
+ label = "slv-qhs-venus-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_vsense_ctrl_cfg:slv-qhs-vsense-ctrl-cfg {
+ cell-id = <MSM_BUS_SLAVE_VSENSE_CTRL_CFG>;
+ label = "slv-qhs-vsense-ctrl-cfg";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qns_cnoc_a2noc:slv-qns-cnoc-a2noc {
+ cell-id = <MSM_BUS_SLAVE_CNOC_A2NOC>;
+ label = "slv-qns-cnoc-a2noc";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,connections = <&mas_qnm_cnoc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_srvc_cnoc:slv-srvc-cnoc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_CNOC>;
+ label = "slv-srvc-cnoc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_config_noc>;
+ qcom,bcms = <&bcm_cn0>;
+ };
+
+ slv_qhs_llcc:slv-qhs-llcc {
+ cell-id = <MSM_BUS_SLAVE_LLCC_CFG>;
+ label = "slv-qhs-llcc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_dc_noc>;
+ };
+
+ slv_qhs_memnoc:slv-qhs-memnoc {
+ cell-id = <MSM_BUS_SLAVE_GEM_NOC_CFG>;
+ label = "slv-qhs-memnoc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_dc_noc>;
+ qcom,connections = <&mas_qhm_gemnoc_cfg>;
+ };
+
+ slv_qns_gem_noc_snoc:slv-qns-gem-noc-snoc {
+ cell-id = <MSM_BUS_SLAVE_GEM_NOC_SNOC>;
+ label = "slv-qns-gem-noc-snoc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,connections = <&mas_qnm_gemnoc>;
+ };
+
+ slv_qns_llcc:slv-qns-llcc {
+ cell-id = <MSM_BUS_SLAVE_LLCC>;
+ label = "slv-qns-llcc";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <4>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,connections = <&mas_llcc_mc>;
+ qcom,bcms = <&bcm_sh0>;
+ };
+
+ slv_qns_sys_pcie:slv-qns-sys-pcie {
+ cell-id = <MSM_BUS_SLAVE_MEM_NOC_PCIE_SNOC>;
+ label = "slv-qns-sys-pcie";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ qcom,connections = <&mas_qnm_gemnoc_pcie>;
+ };
+
+ slv_srvc_even_gemnoc:slv-srvc-even-gemnoc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_GEM_NOC_1>;
+ label = "slv-srvc-even-gemnoc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ };
+
+ slv_srvc_odd_gemnoc:slv-srvc-odd-gemnoc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_GEM_NOC_2>;
+ label = "slv-srvc-odd-gemnoc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ };
+
+ slv_srvc_sys_gemnoc:slv-srvc-sys-gemnoc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_GEM_NOC>;
+ label = "slv-srvc-sys-gemnoc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_gem_noc>;
+ };
+
+ slv_ipa_core_slave:slv-ipa-core-slave {
+ cell-id = <MSM_BUS_SLAVE_IPA_CORE>;
+ label = "slv-ipa-core-slave";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_ipa_virt>;
+ qcom,bcms = <&bcm_ip0>;
+ };
+
+ slv_ebi:slv-ebi {
+ cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
+ label = "slv-ebi";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <4>;
+ qcom,bus-dev = <&fab_mc_virt>;
+ qcom,bcms = <&bcm_mc0>, <&bcm_acv>;
+ };
+
+ slv_qns_mem_noc_hf:slv-qns-mem-noc-hf {
+ cell-id = <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>;
+ label = "slv-qns-mem-noc-hf";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,connections = <&mas_qnm_mnoc_hf>;
+ qcom,bcms = <&bcm_mm0>;
+ };
+
+ slv_qns_mem_noc_sf:slv-qns-mem-noc-sf {
+ cell-id = <MSM_BUS_SLAVE_MNOC_SF_MEM_NOC>;
+ label = "slv-qns-mem-noc-sf";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ qcom,connections = <&mas_qnm_mnoc_sf>;
+ qcom,bcms = <&bcm_mm2>;
+ };
+
+ slv_srvc_mnoc:slv-srvc-mnoc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_MNOC>;
+ label = "slv-srvc-mnoc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_mmss_noc>;
+ };
+
+ slv_qhs_cal_dp0:slv-qhs-cal-dp0 {
+ cell-id = <MSM_BUS_SLAVE_NPU_CAL_DP0>;
+ label = "slv-qhs-cal-dp0";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_cal_dp1:slv-qhs-cal-dp1 {
+ cell-id = <MSM_BUS_SLAVE_NPU_CAL_DP1>;
+ label = "slv-qhs-cal-dp1";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_cp:slv-qhs-cp {
+ cell-id = <MSM_BUS_SLAVE_NPU_CP>;
+ label = "slv-qhs-cp";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_dma_bwmon:slv-qhs-dma-bwmon {
+ cell-id = <MSM_BUS_SLAVE_NPU_INT_DMA_BWMON_CFG>;
+ label = "slv-qhs-dma-bwmon";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_dpm:slv-qhs-dpm {
+ cell-id = <MSM_BUS_SLAVE_NPU_DPM>;
+ label = "slv-qhs-dpm";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_isense:slv-qhs-isense {
+ cell-id = <MSM_BUS_SLAVE_ISENSE_CFG>;
+ label = "slv-qhs-isense";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_llm:slv-qhs-llm {
+ cell-id = <MSM_BUS_SLAVE_NPU_LLM_CFG>;
+ label = "slv-qhs-llm";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_tcm:slv-qhs-tcm {
+ cell-id = <MSM_BUS_SLAVE_NPU_TCM>;
+ label = "slv-qhs-tcm";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qns_npu_sys:slv-qns-npu-sys {
+ cell-id = <MSM_BUS_SLAVE_NPU_COMPUTE_NOC>;
+ label = "slv-qns-npu-sys";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_srvc_noc:slv-srvc-noc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_NPU_NOC>;
+ label = "slv-srvc-noc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_npu_noc>;
+ };
+
+ slv_qhs_apss:slv-qhs-apss {
+ cell-id = <MSM_BUS_SLAVE_APPSS>;
+ label = "slv-qhs-apss";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ };
+
+ slv_qns_cnoc:slv-qns-cnoc {
+ cell-id = <MSM_BUS_SNOC_CNOC_SLV>;
+ label = "slv-qns-cnoc";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,connections = <&mas_qnm_snoc>;
+ };
+
+ slv_qns_gemnoc_gc:slv-qns-gemnoc-gc {
+ cell-id = <MSM_BUS_SLAVE_SNOC_GEM_NOC_GC>;
+ label = "slv-qns-gemnoc-gc";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,connections = <&mas_qnm_snoc_gc>;
+ qcom,bcms = <&bcm_sn2>;
+ };
+
+ slv_qns_gemnoc_sf:slv-qns-gemnoc-sf {
+ cell-id = <MSM_BUS_SLAVE_SNOC_GEM_NOC_SF>;
+ label = "slv-qns-gemnoc-sf";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,connections = <&mas_qnm_snoc_sf>;
+ qcom,bcms = <&bcm_sn0>;
+ };
+
+ slv_qxs_imem:slv-qxs-imem {
+ cell-id = <MSM_BUS_SLAVE_OCIMEM>;
+ label = "slv-qxs-imem";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn1>;
+ };
+
+ slv_qxs_pimem:slv-qxs-pimem {
+ cell-id = <MSM_BUS_SLAVE_PIMEM>;
+ label = "slv-qxs-pimem";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn3>;
+ };
+
+ slv_srvc_snoc:slv-srvc-snoc {
+ cell-id = <MSM_BUS_SLAVE_SERVICE_SNOC>;
+ label = "slv-srvc-snoc";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ };
+
+ slv_xs_pcie_0:slv-xs-pcie-0 {
+ cell-id = <MSM_BUS_SLAVE_PCIE_0>;
+ label = "slv-xs-pcie-0";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn6>;
+ };
+
+ slv_xs_pcie_1:slv-xs-pcie-1 {
+ cell-id = <MSM_BUS_SLAVE_PCIE_1>;
+ label = "slv-xs-pcie-1";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn6>;
+ };
+
+ slv_xs_pcie_modem:slv-xs-pcie-modem {
+ cell-id = <MSM_BUS_SLAVE_PCIE_2>;
+ label = "slv-xs-pcie-modem";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn5>;
+ };
+
+ slv_xs_qdss_stm:slv-xs-qdss-stm {
+ cell-id = <MSM_BUS_SLAVE_QDSS_STM>;
+ label = "slv-xs-qdss-stm";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ qcom,bcms = <&bcm_sn4>;
+ };
+
+ slv_xs_sys_tcu_cfg:slv-xs-sys-tcu-cfg {
+ cell-id = <MSM_BUS_SLAVE_TCU>;
+ label = "slv-xs-sys-tcu-cfg";
+ qcom,buswidth = <8>;
+ qcom,agg-ports = <1>;
+ qcom,bus-dev = <&fab_system_noc>;
+ };
+
+ slv_qns_llcc_display:slv-qns-llcc_display {
+ cell-id = <MSM_BUS_SLAVE_LLCC_DISPLAY>;
+ label = "slv-qns-llcc_display";
+ qcom,buswidth = <16>;
+ qcom,agg-ports = <4>;
+ qcom,bus-dev = <&fab_gem_noc_display>;
+ qcom,connections = <&mas_llcc_mc_display>;
+ qcom,bcms = <&bcm_sh0_display>;
+ };
+
+ slv_ebi_display:slv-ebi_display {
+ cell-id = <MSM_BUS_SLAVE_EBI_CH0_DISPLAY>;
+ label = "slv-ebi_display";
+ qcom,buswidth = <4>;
+ qcom,agg-ports = <4>;
+ qcom,bus-dev = <&fab_mc_virt_display>;
+ qcom,bcms = <&bcm_mc0_display>, <&bcm_acv_display>;
+ };
+
+ slv_qns_mem_noc_hf_display:slv-qns-mem-noc-hf_display {
+ cell-id = <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC_DISPLAY>;
+ label = "slv-qns-mem-noc-hf_display";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,bus-dev = <&fab_mmss_noc_display>;
+ qcom,connections = <&mas_qnm_mnoc_hf_display>;
+ qcom,bcms = <&bcm_mm0_display>;
+ };
+
+ slv_qns_mem_noc_sf_display:slv-qns-mem-noc-sf_display {
+ cell-id = <MSM_BUS_SLAVE_MNOC_SF_MEM_NOC_DISPLAY>;
+ label = "slv-qns-mem-noc-sf_display";
+ qcom,buswidth = <32>;
+ qcom,agg-ports = <2>;
+ qcom,bus-dev = <&fab_mmss_noc_display>;
+ qcom,connections = <&mas_qnm_mnoc_sf_display>;
+ qcom,bcms = <&bcm_mm2_display>;
+ };
+ };
+};
+
diff --git a/arch/arm64/boot/dts/qcom/kona-ion.dtsi b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
index 4a57f8f..b21d5e8 100644
--- a/arch/arm64/boot/dts/qcom/kona-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
@@ -9,8 +9,8 @@
#address-cells = <1>;
#size-cells = <0>;
- system_heap: qcom,ion-heap@19 {
- reg = <0x19>;
+ system_heap: qcom,ion-heap@25 {
+ reg = <0x25>;
qcom,ion-heap-type = "SYSTEM";
};
diff --git a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
index a077e1b..0c0a68f 100644
--- a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
@@ -26,7 +26,7 @@
limit-tx-hs-gear = <1>;
limit-rx-hs-gear = <1>;
- vdd-hba-supply = <&pm8150_s4>;
+ vdd-hba-supply = <&ufs_phy_gdsc>;
vdd-hba-fixed-regulator;
vcc-supply = <&pm8150_l17>;
vccq2-supply = <&pm8150_s4>;
@@ -41,3 +41,36 @@
spm-level = <0>;
status = "ok";
};
+
+&soc {
+ usb_emu_phy: usb_emu_phy@a720000 {
+ compatible = "qcom,usb-emu-phy";
+ reg = <0x0a720000 0x9500>,
+ <0x0a6f8800 0x100>;
+ reg-names = "base", "qscratch_base";
+
+ qcom,emu-init-seq = <0xfff0 0x4
+ 0xfff3 0x4
+ 0x40 0x4
+ 0xfff3 0x4
+ 0xfff0 0x4
+ 0x100000 0x20
+ 0x0 0x20
+ 0x1a0 0x20
+ 0x100000 0x3c
+ 0x0 0x3c
+ 0x10060 0x3c
+ 0x0 0x4>;
+ };
+
+ usb_nop_phy: usb_nop_phy {
+ compatible = "usb-nop-xceiv";
+ };
+};
+
+&usb0 {
+ dwc3@a600000 {
+ usb-phy = <&usb_emu_phy>, <&usb_nop_phy>;
+ maximum-speed = "high-speed";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-usb.dtsi b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
new file mode 100644
index 0000000..81089fe
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-kona.h>
+
+&soc {
+ /* Primary USB port related controller */
+ usb0: ssusb@a600000 {
+ compatible = "qcom,dwc-usb3-msm";
+ reg = <0x0a600000 0x100000>;
+ reg-names = "core_base";
+
+ iommus = <&apps_smmu 0x0 0x0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ interrupts = <0 494 0>, <0 130 0>, <0 497 0>, <0 495 0>;
+ interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
+ "ss_phy_irq", "dm_hs_phy_irq";
+ qcom,use-pdc-interrupts;
+
+ USB3_GDSC-supply = <&usb30_prim_gdsc>;
+ clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
+ <&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+ <&clock_gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&clock_gcc GCC_USB30_PRIM_SLEEP_CLK>,
+ /*
+ * GCC_USB3_SEC_CLKREF_EN provides ref_clk for both
+ * USB instances.
+ */
+ <&clock_gcc GCC_USB3_SEC_CLKREF_EN>;
+ clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+ "utmi_clk", "sleep_clk", "xo";
+
+ resets = <&clock_gcc GCC_USB30_PRIM_BCR>;
+ reset-names = "core_reset";
+
+ qcom,core-clk-rate = <200000000>;
+ qcom,core-clk-rate-hs = <66666667>;
+ qcom,num-gsi-evt-buffs = <0x3>;
+ qcom,gsi-reg-offset =
+ <0x0fc /* GSI_GENERAL_CFG */
+ 0x110 /* GSI_DBL_ADDR_L */
+ 0x120 /* GSI_DBL_ADDR_H */
+ 0x130 /* GSI_RING_BASE_ADDR_L */
+ 0x144 /* GSI_RING_BASE_ADDR_H */
+ 0x1a4>; /* GSI_IF_STS */
+ qcom,dwc-usb3-msm-tx-fifo-size = <27696>;
+
+ dwc3@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0x0a600000 0xcd00>;
+ interrupts = <0 133 0>;
+ linux,sysdev_is_parent;
+ snps,disable-clk-gating;
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+ snps,ssp-u3-u0-quirk;
+ snps,usb3-u1u2-disable;
+ usb-core-id = <0>;
+ tx-fifo-resize;
+ maximum-speed = "super-speed-plus";
+ dr_mode = "drd";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index eaf1679..cbd0a4e 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -15,6 +15,7 @@
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,videocc-kona.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
#include <dt-bindings/soc/qcom,ipcc.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
@@ -462,7 +463,7 @@
};
};
-qcom,msm-imem@146bf000 {
+ qcom,msm-imem@146bf000 {
compatible = "qcom,msm-imem";
reg = <0x146bf000 0x1000>;
ranges = <0x0 0x146bf000 0x1000>;
@@ -612,6 +613,244 @@
#clock-cells = <1>;
};
+ /* GCC GDSCs */
+ pcie_0_gdsc: qcom,gdsc@16b004 {
+ compatible = "qcom,gdsc";
+ reg = <0x16b004 0x4>;
+ regulator-name = "pcie_0_gdsc";
+ };
+
+ pcie_1_gdsc: qcom,gdsc@18d004 {
+ compatible = "qcom,gdsc";
+ reg = <0x18d004 0x4>;
+ regulator-name = "pcie_1_gdsc";
+ };
+
+ pcie_2_gdsc: qcom,gdsc@106004 {
+ compatible = "qcom,gdsc";
+ reg = <0x106004 0x4>;
+ regulator-name = "pcie_2_gdsc";
+ };
+
+ ufs_card_gdsc: qcom,gdsc@175004 {
+ compatible = "qcom,gdsc";
+ reg = <0x175004 0x4>;
+ regulator-name = "ufs_card_gdsc";
+ };
+
+ ufs_phy_gdsc: qcom,gdsc@177004 {
+ compatible = "qcom,gdsc";
+ reg = <0x177004 0x4>;
+ regulator-name = "ufs_phy_gdsc";
+ };
+
+ usb30_prim_gdsc: qcom,gdsc@10f004 {
+ compatible = "qcom,gdsc";
+ reg = <0x10f004 0x4>;
+ regulator-name = "usb30_prim_gdsc";
+ };
+
+ usb30_sec_gdsc: qcom,gdsc@110004 {
+ compatible = "qcom,gdsc";
+ reg = <0x110004 0x4>;
+ regulator-name = "usb30_sec_gdsc";
+ };
+
+ hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d050 {
+ compatible = "qcom,gdsc";
+ reg = <0x17d050 0x4>;
+ regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc";
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ };
+
+ hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc: qcom,gdsc@17d058 {
+ compatible = "qcom,gdsc";
+ reg = <0x17d058 0x4>;
+ regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc";
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ };
+
+ hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc: qcom,gdsc@17d054 {
+ compatible = "qcom,gdsc";
+ reg = <0x17d054 0x4>;
+ regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc";
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ };
+
+ hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc: qcom,gdsc@17d06c {
+ compatible = "qcom,gdsc";
+ reg = <0x17d06c 0x4>;
+ regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc";
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ };
+
+ /* CAM_CC GDSCs */
+ bps_gdsc: qcom,gdsc@ad07004 {
+ compatible = "qcom,gdsc";
+ reg = <0xad07004 0x4>;
+ regulator-name = "bps_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ qcom,support-hw-trigger;
+ };
+
+ ife_0_gdsc: qcom,gdsc@ad0a004 {
+ compatible = "qcom,gdsc";
+ reg = <0xad0a004 0x4>;
+ regulator-name = "ife_0_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
+ ife_1_gdsc: qcom,gdsc@ad0b004 {
+ compatible = "qcom,gdsc";
+ reg = <0xad0b004 0x4>;
+ regulator-name = "ife_1_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
+ ipe_0_gdsc: qcom,gdsc@ad08004 {
+ compatible = "qcom,gdsc";
+ reg = <0xad08004 0x4>;
+ regulator-name = "ipe_0_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ qcom,support-hw-trigger;
+ };
+
+ sbi_gdsc: qcom,gdsc@ad09004 {
+ compatible = "qcom,gdsc";
+ reg = <0xad09004 0x4>;
+ regulator-name = "sbi_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
+ titan_top_gdsc: qcom,gdsc@ad0c144 {
+ compatible = "qcom,gdsc";
+ reg = <0xad0c144 0x4>;
+ regulator-name = "titan_top_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
+ /* DISP_CC GDSC */
+ mdss_core_gdsc: qcom,gdsc@af03000 {
+ compatible = "qcom,gdsc";
+ reg = <0xaf03000 0x4>;
+ regulator-name = "mdss_core_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_DISP_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ qcom,support-hw-trigger;
+ };
+
+ /* GPU_CC GDSCs */
+ gpu_cx_hw_ctrl: syscon@3d91540 {
+ compatible = "syscon";
+ reg = <0x3d91540 0x4>;
+ };
+
+ gpu_cx_gdsc: qcom,gdsc@3d9106c {
+ compatible = "qcom,gdsc";
+ reg = <0x3d9106c 0x4>;
+ regulator-name = "gpu_cx_gdsc";
+ hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
+ parent-supply = <&VDD_CX_LEVEL>;
+ qcom,no-status-check-on-disable;
+ qcom,clk-dis-wait-val = <8>;
+ qcom,gds-timeout = <500>;
+ };
+
+ gpu_gx_domain_addr: syscon@3d91508 {
+ compatible = "syscon";
+ reg = <0x3d91508 0x4>;
+ };
+
+ gpu_gx_sw_reset: syscon@3d91008 {
+ compatible = "syscon";
+ reg = <0x3d91008 0x4>;
+ };
+
+ gpu_gx_gdsc: qcom,gdsc@3d9100c {
+ compatible = "qcom,gdsc";
+ reg = <0x3d9100c 0x4>;
+ regulator-name = "gpu_gx_gdsc";
+ domain-addr = <&gpu_gx_domain_addr>;
+ sw-reset = <&gpu_gx_sw_reset>;
+ parent-supply = <&VDD_GFX_LEVEL>;
+ vdd_parent-supply = <&VDD_GFX_LEVEL>;
+ qcom,reset-aon-logic;
+ };
+
+ /* NPU GDSC */
+ npu_core_gdsc: qcom,gdsc@9981004 {
+ compatible = "qcom,gdsc";
+ reg = <0x9981004 0x4>;
+ regulator-name = "npu_core_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_NPU_CFG_AHB_CLK>;
+ };
+
+ /* VIDEO_CC GDSCs */
+ mvs0_gdsc: qcom,gdsc@abf0d18 {
+ compatible = "qcom,gdsc";
+ reg = <0xabf0d18 0x4>;
+ regulator-name = "mvs0_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
+ mvs0c_gdsc: qcom,gdsc@abf0bf8 {
+ compatible = "qcom,gdsc";
+ reg = <0xabf0bf8 0x4>;
+ regulator-name = "mvs0c_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
+ mvs1_gdsc: qcom,gdsc@abf0d98 {
+ compatible = "qcom,gdsc";
+ reg = <0xabf0d98 0x4>;
+ regulator-name = "mvs1_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
+ mvs1c_gdsc: qcom,gdsc@abf0c98 {
+ compatible = "qcom,gdsc";
+ reg = <0xabf0c98 0x4>;
+ regulator-name = "mvs1c_gdsc";
+ clock-names = "ahb_clk";
+ clocks = <&clock_gcc GCC_VIDEO_AHB_CLK>;
+ parent-supply = <&VDD_MMCX_LEVEL>;
+ vdd_parent-supply = <&VDD_MMCX_LEVEL>;
+ };
+
ufsphy_mem: ufsphy_mem@1d87000 {
reg = <0x1d87000 0xe00>; /* PHY regs */
reg-names = "phy_mem";
@@ -770,6 +1009,11 @@
<WAKE_TCS 3>,
<CONTROL_TCS 1>;
status = "disabled";
+
+ msm_bus_apps_rsc {
+ compatible = "qcom,msm-bus-rsc";
+ qcom,msm-bus-id = <MSM_BUS_RSC_APPS>;
+ };
};
disp_rsc: rsc@af20000 {
@@ -803,9 +1047,19 @@
memory-region = <&smem_mem>;
hwlocks = <&tcsr_mutex 3>;
};
+
+ kryo-erp {
+ compatible = "arm,arm64-kryo-cpu-erp";
+ interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "l1-l2-faultirq",
+ "l3-scu-faultirq";
+ };
};
+#include "kona-bus.dtsi"
#include "kona-ion.dtsi"
#include "msm-arm-smmu-kona.dtsi"
#include "kona-pinctrl.dtsi"
#include "kona-smp2p.dtsi"
+#include "kona-usb.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index dbf96f7..530a1a6 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -19,6 +19,8 @@
#size-cells = <1>;
#address-cells = <1>;
ranges;
+ qcom,regulator-names = "vdd";
+ vdd-supply = <&gpu_cx_gdsc>;
interrupts = <GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 674 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 682 IRQ_TYPE_LEVEL_HIGH>,
@@ -34,7 +36,7 @@
compatible = "qcom,qsmmuv500-tbu";
reg = <0x3DC5000 0x1000>,
<0x3DC2200 0x8>;
- reg-names = "base", "status";
+ reg-names = "base", "status-reg";
qcom,stream-id-range = <0x0 0x400>;
};
@@ -42,7 +44,7 @@
compatible = "qcom,qsmmuv500-tbu";
reg = <0x3DC9000 0x1000>,
<0x3DC2208 0x8>;
- reg-names = "base", "status";
+ reg-names = "base", "status-reg";
qcom,stream-id-range = <0x400 0x400>;
};
};
@@ -180,6 +182,8 @@
<0x15182210 0x8>;
reg-names = "base", "status-reg";
qcom,stream-id-range = <0x800 0x400>;
+ qcom,regulator-names = "vdd";
+ vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>;
};
mnoc_hf_1_tbu: mnoc_hf_1_tbu@15191000 {
@@ -188,7 +192,8 @@
<0x15182218 0x8>;
reg-names = "base", "status-reg";
qcom,stream-id-range = <0xc00 0x400>;
-
+ qcom,regulator-names = "vdd";
+ vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>;
};
compute_dsp_1_tbu: compute_dsp_1_tbu@15195000 {
@@ -229,6 +234,8 @@
<0x15182240 0x8>;
reg-names = "base", "status-reg";
qcom,stream-id-range = <0x2000 0x400>;
+ qcom,regulator-names = "vdd";
+ vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc>;
};
mnoc_sf_1_tbu: mnoc_sf_1_tbu@151a9000 {
@@ -237,6 +244,8 @@
<0x15182248 0x8>;
reg-names = "base", "status-reg";
qcom,stream-id-range = <0x2400 0x400>;
+ qcom,regulator-names = "vdd";
+ vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc>;
};
};
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index de26564..7948353 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -305,6 +305,7 @@
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
CONFIG_USB_ISP1760=y
CONFIG_USB_ISP1760_HOST_ROLE=y
CONFIG_NOP_USB_XCEIV=y
@@ -315,6 +316,7 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_TEST=y
@@ -350,6 +352,9 @@
CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
CONFIG_IIO=y
@@ -394,6 +399,7 @@
CONFIG_PANIC_TIMEOUT=5
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 357e86c..0204cb9 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -313,6 +313,7 @@
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
CONFIG_USB_ISP1760=y
CONFIG_USB_ISP1760_HOST_ROLE=y
CONFIG_NOP_USB_XCEIV=y
@@ -323,6 +324,7 @@
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_TEST=y
@@ -330,6 +332,9 @@
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_EDAC=y
+CONFIG_EDAC_KRYO_ARM64=y
+CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y
+CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
CONFIG_EDAC_QCOM=y
CONFIG_RTC_CLASS=y
CONFIG_DMADEVICES=y
@@ -359,6 +364,9 @@
CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_DEVFREQ_GOV_PASSIVE=y
@@ -438,6 +446,7 @@
CONFIG_FAIL_PAGE_ALLOC=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
CONFIG_QCOM_RTB=y
CONFIG_QCOM_RTB_SEPARATE_CPUS=y
CONFIG_FUNCTION_TRACER=y
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index d894a20..4d37e51 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -29,6 +29,7 @@
#include <linux/arm-smccc.h>
#include <linux/kprobes.h>
+#include <asm/cacheflush.h>
#include <asm/checksum.h>
EXPORT_SYMBOL(copy_page);
@@ -83,3 +84,8 @@
EXPORT_SYMBOL(__ashrti3);
extern long long __lshrti3(long long a, int b);
EXPORT_SYMBOL(__lshrti3);
+
+ /* caching functions */
+EXPORT_SYMBOL(__dma_inv_area);
+EXPORT_SYMBOL(__dma_clean_area);
+EXPORT_SYMBOL(__dma_flush_area);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 159317e..035b2af 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -931,12 +931,17 @@
unsigned long attrs)
{
bool coherent = is_dma_coherent(dev, attrs);
+ int ret;
+
+ ret = iommu_dma_map_sg(dev, sgl, nelems,
+ dma_info_to_prot(dir, coherent, attrs));
+ if (!ret)
+ return ret;
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
- return iommu_dma_map_sg(dev, sgl, nelems,
- dma_info_to_prot(dir, coherent, attrs));
+ return ret;
}
static void __iommu_unmap_sg_attrs(struct device *dev,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 04bbcd7..9a802c0 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1658,7 +1658,7 @@
struct device_node *of_node = dev_of_node(dev);
int error;
- if (of_node) {
+ if (of_node && of_node_kobj(of_node)) {
error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
if (error)
dev_warn(dev, "Error %d creating of_node link\n",error);
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index 6e65e46..8237de0 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -80,13 +80,6 @@
u32 alloc_failed;
};
-/* important: do not exceed sk_buf->cb (48 bytes) */
-struct mhi_skb_priv {
- void *buf;
- size_t size;
- struct mhi_netdev *mhi_netdev;
-};
-
struct mhi_netdev {
int alias;
struct mhi_device *mhi_dev;
@@ -140,7 +133,6 @@
{
u32 cur_mru = mhi_netdev->mru;
struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
- struct mhi_skb_priv *skb_priv;
int ret;
struct sk_buff *skb;
int no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
@@ -158,15 +150,11 @@
goto error_queue;
}
- skb_priv = (struct mhi_skb_priv *)skb->cb;
- skb_priv->buf = skb->data;
- skb_priv->size = cur_mru;
- skb_priv->mhi_netdev = mhi_netdev;
skb->dev = mhi_netdev->ndev;
spin_lock_bh(&mhi_netdev->rx_lock);
- ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb,
- skb_priv->size, MHI_EOT);
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, cur_mru,
+ MHI_EOT);
spin_unlock_bh(&mhi_netdev->rx_lock);
if (ret) {
@@ -301,12 +289,9 @@
struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
int res = 0;
- struct mhi_skb_priv *tx_priv;
MSG_VERB("Entered\n");
- tx_priv = (struct mhi_skb_priv *)(skb->cb);
- tx_priv->mhi_netdev = mhi_netdev;
read_lock_bh(&mhi_netdev->pm_lock);
if (unlikely(!mhi_netdev->enabled)) {
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index c524b0a..269ad77 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -20,6 +20,8 @@
#include <linux/mfd/syscon.h>
#include <linux/clk/qcom.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
/* GDSCR */
#define PWR_ON_MASK BIT(31)
#define CLK_DIS_WAIT_MASK (0xF << 12)
@@ -50,6 +52,7 @@
struct regmap *hw_ctrl;
struct regmap *sw_reset;
struct clk **clocks;
+ struct regulator *parent_regulator;
struct reset_control **reset_clocks;
bool toggle_mem;
bool toggle_periph;
@@ -120,7 +123,7 @@
* bit in the GDSCR to be set or reset after the GDSC state
* changes. Hence, keep on checking for a reasonable number
* of times until the bit is set with the least possible delay
- * between succeessive tries.
+ * between successive tries.
*/
udelay(1);
}
@@ -136,6 +139,30 @@
if (!sc->toggle_logic)
return !sc->resets_asserted;
+ if (sc->parent_regulator) {
+ /*
+ * The parent regulator for the GDSC is required to be on to
+ * make any register accesses to the GDSC base. Return false
+ * if the parent supply is disabled.
+ */
+ if (regulator_is_enabled(sc->parent_regulator) <= 0)
+ return false;
+
+ /*
+ * Place an explicit vote on the parent rail to cover cases when
+ * it might be disabled between this point and reading the GDSC
+ * registers.
+ */
+ if (regulator_set_voltage(sc->parent_regulator,
+ RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX))
+ return false;
+
+ if (regulator_enable(sc->parent_regulator)) {
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+ return false;
+ }
+ }
+
regmap_read(sc->regmap, REG_OFFSET, ®val);
if (regval & PWR_ON_MASK) {
@@ -144,10 +171,20 @@
* votable GDS registers. Check the SW_COLLAPSE_MASK to
* determine if HLOS has voted for it.
*/
- if (!(regval & SW_COLLAPSE_MASK))
+ if (!(regval & SW_COLLAPSE_MASK)) {
+ if (sc->parent_regulator) {
+ regulator_disable(sc->parent_regulator);
+ regulator_set_voltage(sc->parent_regulator, 0,
+ INT_MAX);
+ }
return true;
+ }
}
+ if (sc->parent_regulator) {
+ regulator_disable(sc->parent_regulator);
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+ }
return false;
}
@@ -159,6 +196,15 @@
mutex_lock(&gdsc_seq_lock);
+ if (sc->parent_regulator) {
+ ret = regulator_set_voltage(sc->parent_regulator,
+ RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+ if (ret) {
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+ }
+
if (sc->root_en || sc->force_root_en)
clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
@@ -166,8 +212,8 @@
if (regval & HW_CONTROL_MASK) {
dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
sc->rdesc.name);
- mutex_unlock(&gdsc_seq_lock);
- return -EBUSY;
+ ret = -EBUSY;
+ goto end;
}
if (sc->toggle_logic) {
@@ -250,9 +296,7 @@
dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
sc->rdesc.name, sc->gds_timeout,
regval, hw_ctrl_regval);
-
- mutex_unlock(&gdsc_seq_lock);
- return ret;
+ goto end;
}
} else {
dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
@@ -264,10 +308,7 @@
dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
sc->rdesc.name, regval,
sc->gds_timeout);
-
- mutex_unlock(&gdsc_seq_lock);
-
- return ret;
+ goto end;
}
}
} else {
@@ -300,6 +341,9 @@
clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
sc->is_gdsc_enabled = true;
+end:
+ if (sc->parent_regulator)
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
mutex_unlock(&gdsc_seq_lock);
@@ -314,6 +358,15 @@
mutex_lock(&gdsc_seq_lock);
+ if (sc->parent_regulator) {
+ ret = regulator_set_voltage(sc->parent_regulator,
+ RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+ if (ret) {
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+ }
+
if (sc->force_root_en)
clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
@@ -371,6 +424,9 @@
if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+ if (sc->parent_regulator)
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+
sc->is_gdsc_enabled = false;
mutex_unlock(&gdsc_seq_lock);
@@ -382,9 +438,33 @@
{
struct gdsc *sc = rdev_get_drvdata(rdev);
uint32_t regval;
+ int ret;
mutex_lock(&gdsc_seq_lock);
+
+ if (sc->parent_regulator) {
+ ret = regulator_set_voltage(sc->parent_regulator,
+ RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+ if (ret) {
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+
+ ret = regulator_enable(sc->parent_regulator);
+ if (ret) {
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+ }
+
regmap_read(sc->regmap, REG_OFFSET, ®val);
+
+ if (sc->parent_regulator) {
+ regulator_disable(sc->parent_regulator);
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+ }
+
mutex_unlock(&gdsc_seq_lock);
if (regval & HW_CONTROL_MASK)
@@ -401,6 +481,22 @@
mutex_lock(&gdsc_seq_lock);
+ if (sc->parent_regulator) {
+ ret = regulator_set_voltage(sc->parent_regulator,
+ RPMH_REGULATOR_LEVEL_LOW_SVS, INT_MAX);
+ if (ret) {
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+
+ ret = regulator_enable(sc->parent_regulator);
+ if (ret) {
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+ }
+
regmap_read(sc->regmap, REG_OFFSET, ®val);
switch (mode) {
@@ -444,6 +540,11 @@
break;
}
+ if (sc->parent_regulator) {
+ regulator_disable(sc->parent_regulator);
+ regulator_set_voltage(sc->parent_regulator, 0, INT_MAX);
+ }
+
mutex_unlock(&gdsc_seq_lock);
return ret;
@@ -560,6 +661,19 @@
sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
"qcom,force-enable-root-clk");
+ if (of_find_property(pdev->dev.of_node, "vdd_parent-supply", NULL)) {
+ sc->parent_regulator = devm_regulator_get(&pdev->dev,
+ "vdd_parent");
+ if (IS_ERR(sc->parent_regulator)) {
+ ret = PTR_ERR(sc->parent_regulator);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_parent regulator, err: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
for (i = 0; i < sc->clock_count; i++) {
const char *clock_name;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 9411b4b..e6631d6 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -74,7 +74,6 @@
config CORESIGHT_SOURCE_ETM4X
bool "CoreSight Embedded Trace Macrocell 4.x driver"
- depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
help
This driver provides support for the ETM4.x tracer module, tracing the
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index e0ffada..ed3f2c0 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -6,7 +6,8 @@
obj-$(CONFIG_OF) += of_coresight.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
coresight-tmc-etf.o \
- coresight-tmc-etr.o
+ coresight-tmc-etr.o \
+ coresight-byte-cntr.o
obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
new file mode 100644
index 0000000..aaf6f40
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight Trace Memory Controller driver
+ */
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/of_irq.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include "coresight-byte-cntr.h"
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+static struct tmc_drvdata *tmcdrvdata;
+
+static void tmc_etr_read_bytes(struct byte_cntr *byte_cntr_data, loff_t *ppos,
+ size_t bytes, size_t *len, char **bufp)
+{
+ struct etr_flat_buf *flat_buf = tmcdrvdata->etr_buf->private;
+
+ if (*bufp >= (char *)(flat_buf->vaddr + tmcdrvdata->size))
+ *bufp = flat_buf->vaddr;
+
+ if (*len >= bytes)
+ *len = bytes;
+ else if (((uint32_t)*ppos % bytes) + *len > bytes)
+ *len = bytes - ((uint32_t)*ppos % bytes);
+
+ if ((*bufp + *len) > (char *)(flat_buf->vaddr +
+ tmcdrvdata->size))
+ *len = (char *)(flat_buf->vaddr + tmcdrvdata->size) -
+ *bufp;
+ if (*len == bytes || (*len + (uint32_t)*ppos) % bytes == 0)
+ atomic_dec(&byte_cntr_data->irq_cnt);
+}
+
+
+static irqreturn_t etr_handler(int irq, void *data)
+{
+ struct byte_cntr *byte_cntr_data = data;
+
+ atomic_inc(&byte_cntr_data->irq_cnt);
+
+ wake_up(&byte_cntr_data->wq);
+
+ return IRQ_HANDLED;
+}
+
+static void tmc_etr_flush_bytes(loff_t *ppos, size_t bytes, size_t *len)
+{
+ uint32_t rwp = 0;
+ dma_addr_t paddr = tmcdrvdata->etr_buf->hwaddr;
+
+ rwp = readl_relaxed(tmcdrvdata->base + TMC_RWP);
+
+ if (rwp >= (paddr + *ppos)) {
+ if (bytes > (rwp - paddr - *ppos))
+ *len = rwp - paddr - *ppos;
+ }
+}
+
+static ssize_t tmc_etr_byte_cntr_read(struct file *fp, char __user *data,
+ size_t len, loff_t *ppos)
+{
+ struct byte_cntr *byte_cntr_data = fp->private_data;
+ char *bufp;
+
+ if (!data)
+ return -EINVAL;
+
+ mutex_lock(&byte_cntr_data->byte_cntr_lock);
+ if (!byte_cntr_data->read_active)
+ goto err0;
+
+ bufp = (char *)(tmcdrvdata->buf + *ppos);
+
+ if (byte_cntr_data->enable) {
+ if (!atomic_read(&byte_cntr_data->irq_cnt)) {
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+ if (wait_event_interruptible(byte_cntr_data->wq,
+ atomic_read(&byte_cntr_data->irq_cnt) > 0))
+ return -ERESTARTSYS;
+ mutex_lock(&byte_cntr_data->byte_cntr_lock);
+ if (!byte_cntr_data->read_active)
+ goto err0;
+ }
+
+ tmc_etr_read_bytes(byte_cntr_data, ppos,
+ byte_cntr_data->block_size, &len, &bufp);
+
+ } else {
+ if (!atomic_read(&byte_cntr_data->irq_cnt)) {
+ tmc_etr_flush_bytes(ppos, byte_cntr_data->block_size,
+ &len);
+ if (!len)
+ goto err0;
+ } else {
+ tmc_etr_read_bytes(byte_cntr_data, ppos,
+ byte_cntr_data->block_size,
+ &len, &bufp);
+ }
+ }
+
+ if (copy_to_user(data, bufp, len)) {
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+ dev_dbg(tmcdrvdata->dev, "%s: copy_to_user failed\n", __func__);
+ return -EFAULT;
+ }
+
+ if (*ppos + len >= tmcdrvdata->size)
+ *ppos = 0;
+ else
+ *ppos += len;
+err0:
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+ return len;
+}
+
+void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data)
+{
+ if (!byte_cntr_data)
+ return;
+
+ mutex_lock(&byte_cntr_data->byte_cntr_lock);
+
+ if (byte_cntr_data->block_size == 0) {
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+ return;
+ }
+
+ atomic_set(&byte_cntr_data->irq_cnt, 0);
+ byte_cntr_data->enable = true;
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+}
+EXPORT_SYMBOL(tmc_etr_byte_cntr_start);
+
+void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data)
+{
+ if (!byte_cntr_data)
+ return;
+
+ mutex_lock(&byte_cntr_data->byte_cntr_lock);
+ byte_cntr_data->enable = false;
+ coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+}
+EXPORT_SYMBOL(tmc_etr_byte_cntr_stop);
+
+
+static int tmc_etr_byte_cntr_release(struct inode *in, struct file *fp)
+{
+ struct byte_cntr *byte_cntr_data = fp->private_data;
+
+ mutex_lock(&byte_cntr_data->byte_cntr_lock);
+ byte_cntr_data->read_active = false;
+
+ coresight_csr_set_byte_cntr(byte_cntr_data->csr, 0);
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+ return 0;
+}
+
+static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
+{
+ struct byte_cntr *byte_cntr_data =
+ container_of(in->i_cdev, struct byte_cntr, dev);
+
+ mutex_lock(&byte_cntr_data->byte_cntr_lock);
+
+ if (!tmcdrvdata->enable || !byte_cntr_data->block_size) {
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+ return -EINVAL;
+ }
+
+ /* IRQ is a '8- byte' counter and to observe interrupt at
+ * 'block_size' bytes of data
+ */
+ coresight_csr_set_byte_cntr(byte_cntr_data->csr,
+ (byte_cntr_data->block_size) / 8);
+
+ fp->private_data = byte_cntr_data;
+ nonseekable_open(in, fp);
+ byte_cntr_data->enable = true;
+ byte_cntr_data->read_active = true;
+ mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+ return 0;
+}
+
+static const struct file_operations byte_cntr_fops = {
+ .owner = THIS_MODULE,
+ .open = tmc_etr_byte_cntr_open,
+ .read = tmc_etr_byte_cntr_read,
+ .release = tmc_etr_byte_cntr_release,
+ .llseek = no_llseek,
+};
+
+static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
+{
+ int ret;
+ unsigned int baseminor = 0;
+ unsigned int count = 1;
+ struct device *device;
+ dev_t dev;
+
+ ret = alloc_chrdev_region(&dev, baseminor, count, "byte-cntr");
+ if (ret < 0) {
+ pr_err("alloc_chrdev_region failed %d\n", ret);
+ return ret;
+ }
+ cdev_init(&byte_cntr_data->dev, &byte_cntr_fops);
+
+ byte_cntr_data->dev.owner = THIS_MODULE;
+ byte_cntr_data->dev.ops = &byte_cntr_fops;
+
+ ret = cdev_add(&byte_cntr_data->dev, dev, 1);
+ if (ret)
+ goto exit_unreg_chrdev_region;
+
+ byte_cntr_data->driver_class = class_create(THIS_MODULE,
+ "coresight-tmc-etr-stream");
+ if (IS_ERR(byte_cntr_data->driver_class)) {
+ ret = -ENOMEM;
+ pr_err("class_create failed %d\n", ret);
+ goto exit_unreg_chrdev_region;
+ }
+
+ device = device_create(byte_cntr_data->driver_class, NULL,
+ byte_cntr_data->dev.dev, byte_cntr_data,
+ "byte-cntr");
+
+ if (IS_ERR(device)) {
+ pr_err("class_device_create failed %d\n", ret);
+ ret = -ENOMEM;
+ goto exit_destroy_class;
+ }
+
+ return 0;
+
+exit_destroy_class:
+ class_destroy(byte_cntr_data->driver_class);
+exit_unreg_chrdev_region:
+ unregister_chrdev_region(byte_cntr_data->dev.dev, 1);
+ return ret;
+}
+
+struct byte_cntr *byte_cntr_init(struct amba_device *adev,
+ struct tmc_drvdata *drvdata)
+{
+ struct device *dev = &adev->dev;
+ struct device_node *np = adev->dev.of_node;
+ int byte_cntr_irq;
+ int ret;
+ struct byte_cntr *byte_cntr_data;
+
+ byte_cntr_irq = of_irq_get_byname(np, "byte-cntr-irq");
+ if (byte_cntr_irq < 0)
+ return NULL;
+
+ byte_cntr_data = devm_kzalloc(dev, sizeof(*byte_cntr_data), GFP_KERNEL);
+ if (!byte_cntr_data)
+ return NULL;
+
+ ret = devm_request_irq(dev, byte_cntr_irq, etr_handler,
+ IRQF_TRIGGER_RISING | IRQF_SHARED,
+ "tmc-etr", byte_cntr_data);
+ if (ret) {
+ devm_kfree(dev, byte_cntr_data);
+ dev_err(dev, "Byte_cntr interrupt registration failed\n");
+ return NULL;
+ }
+
+ ret = byte_cntr_register_chardev(byte_cntr_data);
+ if (ret) {
+ devm_free_irq(dev, byte_cntr_irq, byte_cntr_data);
+ devm_kfree(dev, byte_cntr_data);
+ dev_err(dev, "Byte_cntr char dev registration failed\n");
+ return NULL;
+ }
+
+ tmcdrvdata = drvdata;
+ byte_cntr_data->byte_cntr_irq = byte_cntr_irq;
+ byte_cntr_data->csr = drvdata->csr;
+ atomic_set(&byte_cntr_data->irq_cnt, 0);
+ init_waitqueue_head(&byte_cntr_data->wq);
+ mutex_init(&byte_cntr_data->byte_cntr_lock);
+
+ return byte_cntr_data;
+}
+EXPORT_SYMBOL(byte_cntr_init);
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.h b/drivers/hwtracing/coresight/coresight-byte-cntr.h
new file mode 100644
index 0000000..bbfdb2c
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CORESIGHT_BYTE_CNTR_H
+#define _CORESIGHT_BYTE_CNTR_H
+#include <linux/cdev.h>
+#include <linux/amba/bus.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+struct byte_cntr {
+ struct cdev dev;
+ struct class *driver_class;
+ bool enable;
+ bool read_active;
+ uint32_t byte_cntr_value;
+ uint32_t block_size;
+ int byte_cntr_irq;
+ atomic_t irq_cnt;
+ wait_queue_head_t wq;
+ struct mutex byte_cntr_lock;
+ struct coresight_csr *csr;
+};
+
+extern void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data);
+extern void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data);
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 2f192a1..e9d3006 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -977,7 +977,11 @@
spin_lock_init(&drvdata->spinlock);
- drvdata->cpu = pdata ? pdata->cpu : 0;
+ drvdata->cpu = pdata ? pdata->cpu : -ENODEV;
+ if (drvdata->cpu == -ENODEV) {
+ dev_info(dev, "CPU not available\n");
+ return -ENODEV;
+ }
cpus_read_lock();
ret = smp_call_function_single(drvdata->cpu,
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
index bfcdade..e641c43 100644
--- a/drivers/hwtracing/coresight/coresight-ost.c
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
@@ -169,6 +169,9 @@
uint32_t ch;
void __iomem *ch_addr;
+ if (!(drvdata && drvdata->master_enable))
+ return 0;
+
/* allocate channel and get the channel address */
ch = stm_channel_alloc();
if (unlikely(ch >= drvdata->numsp)) {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 8b333cd..8954836 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -72,6 +72,9 @@
/* Reserve the first 10 channels for kernel usage */
#define STM_CHANNEL_OFFSET 0
+#define APPS_NIDEN_SHIFT 17
+#define APPS_DBGEN_SHIFT 16
+
static int boot_nr_channel;
/*
@@ -357,6 +360,9 @@
if (!(drvdata && local_read(&drvdata->mode)))
return -EACCES;
+ if (!drvdata->master_enable)
+ return -EPERM;
+
if (channel >= drvdata->numsp)
return -EINVAL;
@@ -772,6 +778,17 @@
drvdata->stm.set_options = stm_generic_set_options;
}
+static bool is_apps_debug_disabled(struct stm_drvdata *drvdata)
+{
+ u32 val;
+
+ val = readl_relaxed(drvdata->debug_status_chs.base);
+
+ val &= BIT(APPS_NIDEN_SHIFT);
+
+ return val == 0;
+}
+
static int stm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -782,6 +799,7 @@
struct stm_drvdata *drvdata;
struct resource *res = &adev->res;
struct resource ch_res;
+ struct resource debug_ch_res;
size_t res_size, bitmap_size;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
@@ -820,6 +838,22 @@
return PTR_ERR(base);
drvdata->chs.base = base;
+ ret = stm_get_resource_byname(np, "stm-debug-status", &debug_ch_res);
+ /*
+ * By default, master enable is true, means not controlled
+ * by debug status register
+ */
+ if (!ret) {
+ drvdata->debug_status_chs.phys = debug_ch_res.start;
+ base = devm_ioremap_resource(dev, &debug_ch_res);
+ if (!IS_ERR(base)) {
+ drvdata->debug_status_chs.base = base;
+ drvdata->master_enable =
+ !is_apps_debug_disabled(drvdata);
+ }
+ } else
+ drvdata->master_enable = true;
+
drvdata->write_bytes = stm_fundamental_data_size(drvdata);
if (boot_nr_channel) {
@@ -868,7 +902,8 @@
pm_runtime_put(&adev->dev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
+ dev_info(dev, "%s initialized with master %s\n", (char *)id->data,
+ drvdata->master_enable ? "Enabled" : "Disabled");
return 0;
stm_unregister:
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 0549249..34c4c74 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -179,6 +179,12 @@
if (!used)
kfree(buf);
+ if (!ret) {
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
+ dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ }
+
return ret;
}
@@ -256,6 +262,9 @@
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
+
dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
}
@@ -542,11 +551,13 @@
goto out;
}
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- ret = -EINVAL;
- goto out;
+ if (drvdata->enable) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ ret = -EINVAL;
+ goto out;
+ }
}
/* Don't interfere if operated from Perf */
@@ -585,11 +596,13 @@
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return -EINVAL;
+ if (drvdata->enable) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EINVAL;
+ }
}
/* Re-enable the TMC if need be */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 2eda5de..d623cb6 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -13,13 +13,6 @@
#include "coresight-priv.h"
#include "coresight-tmc.h"
-struct etr_flat_buf {
- struct device *dev;
- dma_addr_t daddr;
- void *vaddr;
- size_t size;
-};
-
/*
* The TMC ETR SG has a page size of 4K. The SG table contains pointers
* to 4KB buffers. However, the OS may use a PAGE_SIZE different from
@@ -895,7 +888,7 @@
tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
}
-static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
struct etr_buf *etr_buf = drvdata->etr_buf;
@@ -930,6 +923,10 @@
axictl |= TMC_AXICTL_SCT_GAT_MODE;
}
+ axictl = (axictl &
+ ~(TMC_AXICTL_CACHE_CTL_B0 | TMC_AXICTL_CACHE_CTL_B1 |
+ TMC_AXICTL_CACHE_CTL_B2 | TMC_AXICTL_CACHE_CTL_B3)) |
+ TMC_AXICTL_CACHE_CTL_B0;
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
tmc_write_dba(drvdata, etr_buf->hwaddr);
/*
@@ -944,6 +941,10 @@
writel_relaxed(sts, drvdata->base + TMC_STS);
}
+ writel_relaxed(etr_buf->hwaddr, drvdata->base + TMC_DBALO);
+ writel_relaxed(((u64)etr_buf->hwaddr >> 32) & 0xFF,
+ drvdata->base + TMC_DBAHI);
+
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
@@ -999,7 +1000,7 @@
tmc_sync_etr_buf(drvdata);
}
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -1034,19 +1035,26 @@
* buffer, provided the size matches. Any allocation has to be done
* with the lock released.
*/
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Allocate memory with the locks released */
free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
- if (IS_ERR(new_buf))
+ if (IS_ERR(new_buf)) {
+ mutex_unlock(&drvdata->mem_lock);
return PTR_ERR(new_buf);
-
+ }
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
}
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+ }
+
if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
@@ -1071,7 +1079,11 @@
}
drvdata->mode = CS_MODE_SYSFS;
- tmc_etr_enable_hw(drvdata);
+
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ tmc_etr_enable_hw(drvdata);
+
+ drvdata->enable = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1079,6 +1091,10 @@
if (free_buf)
tmc_etr_free_sysfs_buf(free_buf);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ tmc_etr_byte_cntr_start(drvdata->byte_cntr);
+
+ mutex_unlock(&drvdata->mem_lock);
if (!ret)
dev_info(drvdata->dev, "TMC-ETR enabled\n");
@@ -1109,20 +1125,30 @@
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
return;
}
/* Disable the TMC only if it needs to */
if (drvdata->mode != CS_MODE_DISABLED) {
- tmc_etr_disable_hw(drvdata);
+ if (drvdata->out_mode != TMC_ETR_OUT_MODE_USB)
+ tmc_etr_disable_hw(drvdata);
+
drvdata->mode = CS_MODE_DISABLED;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+ tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ }
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC-ETR disabled\n");
}
@@ -1144,12 +1170,17 @@
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
goto out;
}
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
+ ret = -EINVAL;
+ goto out;
+ }
/* Don't interfere if operated from Perf */
if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
@@ -1162,6 +1193,11 @@
goto out;
}
+ if (drvdata->byte_cntr && drvdata->byte_cntr->enable) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* Disable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS)
tmc_etr_disable_hw(drvdata);
@@ -1169,6 +1205,7 @@
drvdata->reading = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
return ret;
}
@@ -1181,7 +1218,7 @@
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
-
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
@@ -1208,5 +1245,7 @@
if (etr_buf)
tmc_free_etr_buf(etr_buf);
+
+ mutex_unlock(&drvdata->mem_lock);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index aff579c..d22014d 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2012,2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012,2017-2018, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Trace Memory Controller driver
*/
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
+#include <asm/dma-iommu.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
@@ -56,11 +57,13 @@
void tmc_enable_hw(struct tmc_drvdata *drvdata)
{
+ drvdata->enable = true;
writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
}
void tmc_disable_hw(struct tmc_drvdata *drvdata)
{
+ drvdata->enable = false;
writel_relaxed(0x0, drvdata->base + TMC_CTL);
}
@@ -145,18 +148,23 @@
ssize_t actual;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
+ mutex_lock(&drvdata->mem_lock);
actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
- if (actual <= 0)
+ if (actual <= 0) {
+ mutex_unlock(&drvdata->mem_lock);
return 0;
+ }
if (copy_to_user(data, bufp, actual)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ mutex_unlock(&drvdata->mem_lock);
return -EFAULT;
}
*ppos += actual;
dev_dbg(drvdata->dev, "%zu bytes copied\n", actual);
+ mutex_unlock(&drvdata->mem_lock);
return actual;
}
@@ -293,30 +301,225 @@
unsigned long val;
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ if (drvdata->enable) {
+ pr_err("ETR is in use, disable it to change the mem_size\n");
+ return -EINVAL;
+ }
/* Only permitted for TMC-ETRs */
if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
return -EPERM;
-
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
/* The buffer size should be page aligned */
if (val & (PAGE_SIZE - 1))
return -EINVAL;
+
drvdata->size = val;
return size;
}
static DEVICE_ATTR_RW(buffer_size);
-static struct attribute *coresight_tmc_attrs[] = {
+static ssize_t out_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ str_tmc_etr_out_mode[drvdata->out_mode]);
+}
+
+static ssize_t out_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ char str[10] = "";
+ unsigned long flags;
+ int ret;
+
+ if (strlen(buf) >= 10)
+ return -EINVAL;
+ if (sscanf(buf, "%10s", str) != 1)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->mem_lock);
+ if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_MEM])) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->enable) {
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+
+ tmc_etr_enable_hw(drvdata);
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+
+ } else if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
+ if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
+ goto out;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->enable) {
+ drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto err1;
+ }
+ tmc_etr_disable_hw(drvdata);
+ drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+ coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+
+ }
+out:
+ mutex_unlock(&drvdata->mem_lock);
+ return size;
+err1:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
+ return ret;
+}
+static DEVICE_ATTR_RW(out_mode);
+
+static ssize_t available_out_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(str_tmc_etr_out_mode); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%s ",
+ str_tmc_etr_out_mode[i]);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+}
+static DEVICE_ATTR_RO(available_out_modes);
+
+static ssize_t block_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ uint32_t val = 0;
+
+ if (drvdata->byte_cntr)
+ val = drvdata->byte_cntr->block_size;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ val);
+}
+
+static ssize_t block_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (!drvdata->byte_cntr)
+ return -EINVAL;
+
+ mutex_lock(&drvdata->byte_cntr->byte_cntr_lock);
+ drvdata->byte_cntr->block_size = val * 8;
+ mutex_unlock(&drvdata->byte_cntr->byte_cntr_lock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(block_size);
+
+static int tmc_iommu_init(struct tmc_drvdata *drvdata)
+{
+ struct device_node *node = drvdata->dev->of_node;
+ int s1_bypass;
+ int ret = 0;
+
+ if (!of_property_read_bool(node, "iommus"))
+ return 0;
+
+ drvdata->iommu_mapping = arm_iommu_create_mapping(&amba_bustype,
+ 0, (SZ_1G * 4ULL));
+ if (IS_ERR_OR_NULL(drvdata->iommu_mapping)) {
+ dev_err(drvdata->dev, "Create mapping failed, err = %d\n", ret);
+ ret = PTR_ERR(drvdata->iommu_mapping);
+ goto iommu_map_err;
+ }
+
+ s1_bypass = of_property_read_bool(node, "qcom,smmu-s1-bypass");
+ ret = iommu_domain_set_attr(drvdata->iommu_mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+ if (ret) {
+ dev_err(drvdata->dev, "IOMMU set s1 bypass (%d) failed (%d)\n",
+ s1_bypass, ret);
+ goto iommu_attach_fail;
+ }
+
+ ret = arm_iommu_attach_device(drvdata->dev, drvdata->iommu_mapping);
+ if (ret) {
+ dev_err(drvdata->dev, "Attach device failed, err = %d\n", ret);
+ goto iommu_attach_fail;
+ }
+
+ return ret;
+
+iommu_attach_fail:
+ arm_iommu_release_mapping(drvdata->iommu_mapping);
+iommu_map_err:
+ drvdata->iommu_mapping = NULL;
+ return ret;
+}
+
+static void tmc_iommu_deinit(struct tmc_drvdata *drvdata)
+{
+ if (!drvdata->iommu_mapping)
+ return;
+
+ arm_iommu_detach_device(drvdata->dev);
+ arm_iommu_release_mapping(drvdata->iommu_mapping);
+
+ drvdata->iommu_mapping = NULL;
+}
+
+static struct attribute *coresight_tmc_etf_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_buffer_size.attr,
NULL,
};
-static const struct attribute_group coresight_tmc_group = {
- .attrs = coresight_tmc_attrs,
+static struct attribute *coresight_tmc_etr_attrs[] = {
+ &dev_attr_trigger_cntr.attr,
+ &dev_attr_buffer_size.attr,
+ &dev_attr_out_mode.attr,
+ &dev_attr_available_out_modes.attr,
+ &dev_attr_block_size.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_tmc_etf_group = {
+ .attrs = coresight_tmc_etf_attrs,
+};
+
+static const struct attribute_group coresight_tmc_etr_group = {
+ .attrs = coresight_tmc_etr_attrs,
};
static const struct attribute_group coresight_tmc_mgmt_group = {
@@ -324,8 +527,14 @@
.name = "mgmt",
};
-const struct attribute_group *coresight_tmc_groups[] = {
- &coresight_tmc_group,
+const struct attribute_group *coresight_tmc_etf_groups[] = {
+ &coresight_tmc_etf_group,
+ &coresight_tmc_mgmt_group,
+ NULL,
+};
+
+const struct attribute_group *coresight_tmc_etr_groups[] = {
+ &coresight_tmc_etr_group,
&coresight_tmc_mgmt_group,
NULL,
};
@@ -381,16 +590,19 @@
case TMC_CONFIG_TYPE_ETB:
desc->type = CORESIGHT_DEV_TYPE_SINK;
desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc->groups = coresight_tmc_etf_groups;
desc->ops = &tmc_etb_cs_ops;
break;
case TMC_CONFIG_TYPE_ETR:
desc->type = CORESIGHT_DEV_TYPE_SINK;
desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc->groups = coresight_tmc_etr_groups;
desc->ops = &tmc_etr_cs_ops;
break;
case TMC_CONFIG_TYPE_ETF:
desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
+ desc->groups = coresight_tmc_etf_groups;
desc->ops = &tmc_etf_cs_ops;
break;
default:
@@ -411,15 +623,14 @@
struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
struct device_node *np = adev->dev.of_node;
+ struct coresight_cti_data *ctidata;
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out;
- }
- adev->dev.platform_data = pdata;
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata)) {
+ ret = PTR_ERR(pdata);
+ goto out;
}
+ adev->dev.platform_data = pdata;
ret = -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
@@ -439,16 +650,16 @@
drvdata->base = base;
spin_lock_init(&drvdata->spinlock);
+ mutex_init(&drvdata->mem_lock);
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
drvdata->memwidth = tmc_get_memwidth(devid);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- if (np)
- ret = of_property_read_u32(np,
- "arm,buffer-size",
- &drvdata->size);
+ drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
+ ret = of_property_read_u32(np, "arm,buffer-size",
+ &drvdata->size);
if (ret)
drvdata->size = SZ_1M;
} else {
@@ -468,9 +679,21 @@
}
}
+ ctidata = of_get_coresight_cti_data(dev, adev->dev.of_node);
+ if (IS_ERR(ctidata)) {
+ dev_err(dev, "invalid cti data\n");
+ } else if (ctidata && ctidata->nr_ctis == 2) {
+ drvdata->cti_flush = coresight_cti_get(ctidata->names[0]);
+ if (IS_ERR(drvdata->cti_flush))
+ dev_err(dev, "failed to get flush cti\n");
+
+ drvdata->cti_reset = coresight_cti_get(ctidata->names[1]);
+ if (IS_ERR(drvdata->cti_reset))
+ dev_err(dev, "failed to get reset cti\n");
+ }
+
desc.pdata = pdata;
desc.dev = dev;
- desc.groups = coresight_tmc_groups;
ret = tmc_config_desc(drvdata, &desc);
if (ret)
goto out;
@@ -479,20 +702,34 @@
ret = tmc_etr_setup_caps(drvdata, devid, id->data);
if (ret)
goto out;
+
+ drvdata->byte_cntr = byte_cntr_init(adev, drvdata);
+ }
+
+ ret = tmc_iommu_init(drvdata);
+ if (ret) {
+ dev_err(dev, "TMC SMMU init failed, err =%d\n", ret);
+ goto out;
}
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto out;
+ goto out_iommu_deinit;
}
drvdata->miscdev.name = pdata->name;
drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
drvdata->miscdev.fops = &tmc_fops;
ret = misc_register(&drvdata->miscdev);
- if (ret)
+ if (ret) {
coresight_unregister(drvdata->csdev);
+ goto out_iommu_deinit;
+ }
+ return ret;
+
+out_iommu_deinit:
+ tmc_iommu_deinit(drvdata);
out:
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 2a3ab2f..076dda3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -9,6 +9,13 @@
#include <linux/dma-mapping.h>
#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <asm/cacheflush.h>
+#include <linux/of_address.h>
+#include <linux/amba/bus.h>
+#include <linux/coresight-cti.h>
+
+#include "coresight-byte-cntr.h"
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -64,6 +71,10 @@
#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
+#define TMC_AXICTL_CACHE_CTL_B0 BIT(2)
+#define TMC_AXICTL_CACHE_CTL_B1 BIT(3)
+#define TMC_AXICTL_CACHE_CTL_B2 BIT(4)
+#define TMC_AXICTL_CACHE_CTL_B3 BIT(5)
#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
#define TMC_AXICTL_WR_BURST_16 0xF00
/* Write-back Read and Write-allocate */
@@ -129,8 +140,27 @@
ETR_MODE_CATU, /* Use SG mechanism in CATU */
};
+enum tmc_etr_out_mode {
+ TMC_ETR_OUT_MODE_NONE,
+ TMC_ETR_OUT_MODE_MEM,
+ TMC_ETR_OUT_MODE_USB,
+};
+
+static const char * const str_tmc_etr_out_mode[] = {
+ [TMC_ETR_OUT_MODE_NONE] = "none",
+ [TMC_ETR_OUT_MODE_MEM] = "mem",
+ [TMC_ETR_OUT_MODE_USB] = "usb",
+};
+
struct etr_buf_operations;
+struct etr_flat_buf {
+ struct device *dev;
+ dma_addr_t daddr;
+ void *vaddr;
+ size_t size;
+};
+
/**
* struct etr_buf - Details of the buffer used by ETR
* @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
@@ -187,10 +217,17 @@
u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
+ struct mutex mem_lock;
u32 trigger_cntr;
u32 etr_caps;
struct coresight_csr *csr;
const char *csr_name;
+ bool enable;
+ struct coresight_cti *cti_flush;
+ struct coresight_cti *cti_reset;
+ enum tmc_etr_out_mode out_mode;
+ struct byte_cntr *byte_cntr;
+ struct dma_iommu_mapping *iommu_mapping;
};
struct etr_buf_operations {
@@ -251,6 +288,10 @@
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+void tmc_etr_enable_hw(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
+extern struct byte_cntr *byte_cntr_init(struct amba_device *adev,
+ struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index b108583..e8ac7e7 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -150,6 +150,7 @@
if (atomic_dec_return(csdev->refcnt) == 0) {
if (sink_ops(csdev)->disable) {
sink_ops(csdev)->disable(csdev);
+ csdev->enable = false;
csdev->activated = false;
}
}
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index a329695..30f036d 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -16,7 +16,7 @@
#include <linux/coresight.h>
#include <linux/cpumask.h>
#include <asm/smp_plat.h>
-
+#include <linux/coresight-cti.h>
static int of_dev_node_match(struct device *dev, void *data)
{
@@ -103,14 +103,16 @@
struct device_node *dn;
dn = of_parse_phandle(node, "cpu", 0);
- /* Affinity defaults to CPU0 */
+
+ /* Affinity defaults to invalid */
if (!dn)
- return 0;
+ return -ENODEV;
+
cpu = of_cpu_node_to_id(dn);
of_node_put(dn);
- /* Affinity to CPU0 if no cpu nodes are found */
- return (cpu < 0) ? 0 : cpu;
+ /* Affinity to invalid if no cpu nodes are found */
+ return (cpu < 0) ? -ENODEV : cpu;
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
@@ -130,9 +132,11 @@
if (!pdata)
return ERR_PTR(-ENOMEM);
- /* Use device name as sysfs handle */
- pdata->name = dev_name(dev);
-
+ ret = of_property_read_string(node, "coresight-name", &pdata->name);
+ if (ret) {
+ /* Use device name as sysfs handle */
+ pdata->name = dev_name(dev);
+ }
/* Get the number of input and output port for this component */
of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
@@ -181,7 +185,10 @@
if (!rdev)
return ERR_PTR(-EPROBE_DEFER);
- pdata->child_names[i] = dev_name(rdev);
+ ret = of_property_read_string(rparent, "coresight-name",
+ &pdata->child_names[i]);
+ if (ret)
+ pdata->child_names[i] = dev_name(rdev);
pdata->child_ports[i] = rendpoint.id;
i++;
@@ -211,3 +218,45 @@
return 0;
}
EXPORT_SYMBOL(of_get_coresight_csr_name);
+
+struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node)
+{
+ int i, ret;
+ uint32_t ctis_len;
+ struct device_node *child_node;
+ struct coresight_cti_data *ctidata;
+
+ ctidata = devm_kzalloc(dev, sizeof(*ctidata), GFP_KERNEL);
+ if (!ctidata)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_get_property(node, "coresight-ctis", &ctis_len))
+ ctidata->nr_ctis = ctis_len/sizeof(uint32_t);
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (ctidata->nr_ctis) {
+ ctidata->names = devm_kzalloc(dev, ctidata->nr_ctis *
+ sizeof(*ctidata->names),
+ GFP_KERNEL);
+ if (!ctidata->names)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ctidata->nr_ctis; i++) {
+ child_node = of_parse_phandle(node, "coresight-ctis",
+ i);
+ if (!child_node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_property_read_string(child_node,
+ "coresight-name",
+ &ctidata->names[i]);
+ of_node_put(child_node);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ }
+ return ctidata;
+}
+EXPORT_SYMBOL(of_get_coresight_cti_data);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4756c80..7482de5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -60,7 +60,7 @@
config IOMMU_IO_PGTABLE_FAST
bool "Fast ARMv7/v8 Long Descriptor Format"
- select IOMMU_IO_PGTABLE
+ depends on ARM64_DMA_USE_IOMMU || ARM_DMA_USE_IOMMU
help
Enable support for a subset of the ARM long descriptor pagetable
format. This allocator achieves fast performance by
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 51bf2aa..89991d2 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -9,6 +9,7 @@
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
+obj-$(CONFIG_MSM_TZ_SMMU) += io-pgtable-msm-secure.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o
diff --git a/drivers/iommu/arm-smmu-errata.c b/drivers/iommu/arm-smmu-errata.c
index a5367ae..c3575f8 100644
--- a/drivers/iommu/arm-smmu-errata.c
+++ b/drivers/iommu/arm-smmu-errata.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -32,7 +32,7 @@
ret = hyp_assign_phys(page_to_phys(page), PAGE_ALIGN(size),
&source_vm, 1,
&dest_vm, &dest_perm, 1);
- if (ret) {
+ if (ret && (ret != -EIO)) {
__free_pages(page, get_order(size));
page = NULL;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9680912..82f9bc5 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -53,6 +53,7 @@
#include <soc/qcom/secure_buffer.h>
#include <linux/of_platform.h>
#include <linux/msm-bus.h>
+#include <trace/events/iommu.h>
#include <dt-bindings/msm/msm-bus-ids.h>
#include <linux/amba/bus.h>
@@ -247,6 +248,7 @@
#define ARM_SMMU_OPT_NO_ASID_RETENTION (1 << 5)
#define ARM_SMMU_OPT_STATIC_CB (1 << 6)
#define ARM_SMMU_OPT_DISABLE_ATOS (1 << 7)
+#define ARM_SMMU_OPT_MIN_IOVA_ALIGN (1 << 8)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
@@ -362,6 +364,8 @@
struct list_head unassign_list;
struct mutex assign_lock;
struct list_head secure_pool_list;
+ /* nonsecure pool protected by pgtbl_lock */
+ struct list_head nonsecure_pool;
struct iommu_domain domain;
bool qsmmuv500_errata1_min_iova_align;
};
@@ -384,6 +388,7 @@
{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
{ ARM_SMMU_OPT_STATIC_CB, "qcom,enable-static-cb"},
{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
+ { ARM_SMMU_OPT_MIN_IOVA_ALIGN, "qcom,min-iova-align" },
{ 0, NULL},
};
@@ -961,7 +966,7 @@
}
/* Wait for any pending TLB invalidations to complete */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
+static int __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
void __iomem *sync, void __iomem *status)
{
unsigned int spin_cnt, delay;
@@ -970,13 +975,15 @@
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
- return;
+ return 0;
cpu_relax();
}
udelay(delay);
}
+ trace_tlbsync_timeout(smmu->dev, 0);
dev_err_ratelimited(smmu->dev,
"TLB sync timed out -- SMMU may be deadlocked\n");
+ return -EINVAL;
}
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
@@ -985,8 +992,10 @@
unsigned long flags;
spin_lock_irqsave(&smmu->global_sync_lock, flags);
- __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
- base + ARM_SMMU_GR0_sTLBGSTATUS);
+ if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
+ base + ARM_SMMU_GR0_sTLBGSTATUS))
+ dev_err_ratelimited(smmu->dev,
+ "TLB global sync failed!\n");
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
@@ -998,8 +1007,12 @@
unsigned long flags;
spin_lock_irqsave(&smmu_domain->sync_lock, flags);
- __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
- base + ARM_SMMU_CB_TLBSTATUS);
+ if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
+ base + ARM_SMMU_CB_TLBSTATUS))
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync on cb%d failed for device %s\n",
+ smmu_domain->cfg.cbndx,
+ dev_name(smmu_domain->dev));
spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);
}
@@ -1013,10 +1026,14 @@
static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
+ struct device *dev = smmu_domain->dev;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
+ ktime_t cur = ktime_get();
+
+ trace_tlbi_start(dev, 0);
if (!use_tlbiall)
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
@@ -1024,6 +1041,7 @@
writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
arm_smmu_tlb_sync_context(cookie);
+ trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
}
static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -1141,6 +1159,7 @@
list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
/* pages will be freed later (after being unassigned) */
+ list_del(&it->list);
kfree(it);
}
}
@@ -1152,8 +1171,19 @@
void *page;
struct arm_smmu_domain *smmu_domain = cookie;
- if (!arm_smmu_is_master_side_secure(smmu_domain))
+ if (!arm_smmu_is_master_side_secure(smmu_domain)) {
+ struct page *pg;
+ /* size is expected to be 4K with current configuration */
+ if (size == PAGE_SIZE) {
+ pg = list_first_entry_or_null(
+ &smmu_domain->nonsecure_pool, struct page, lru);
+ if (pg) {
+ list_del_init(&pg->lru);
+ return page_address(pg);
+ }
+ }
return alloc_pages_exact(size, gfp_mask);
+ }
page = arm_smmu_secure_pool_remove(smmu_domain, size);
if (page)
@@ -1254,7 +1284,7 @@
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
int flags, ret, tmp;
- u32 fsr, fsynr, resume;
+ u32 fsr, fsynr0, fsynr1, frsynra, resume;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -1264,7 +1294,7 @@
void __iomem *gr1_base;
bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
phys_addr_t phys_soft;
- u32 frsynra;
+ uint64_t pte;
bool non_fatal_fault = !!(smmu_domain->attributes &
(1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
@@ -1291,8 +1321,9 @@
BUG();
}
- fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
- flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+ fsynr0 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+ fsynr1 = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR1);
+ flags = fsynr0 & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
if (fsr & FSR_TF)
flags |= IOMMU_FAULT_TRANSLATION;
if (fsr & FSR_PF)
@@ -1309,8 +1340,8 @@
tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
if (!tmp || (tmp == -EBUSY)) {
dev_dbg(smmu->dev,
- "Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
- iova, fsr, fsynr, cfg->cbndx);
+ "Context fault handled by client: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n",
+ iova, cfg->cbndx, fsr, fsynr0, fsynr1);
dev_dbg(smmu->dev,
"soft iova-to-phys=%pa\n", &phys_soft);
ret = IRQ_HANDLED;
@@ -1320,20 +1351,23 @@
fsr);
if (__ratelimit(&_rs)) {
dev_err(smmu->dev,
- "Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
- iova, fsr, fsynr, cfg->cbndx);
+ "Unhandled context fault: iova=0x%08lx, cb=%d, fsr=0x%x, fsynr0=0x%x, fsynr1=0x%x\n",
+ iova, cfg->cbndx, fsr, fsynr0, fsynr1);
dev_err(smmu->dev, "FAR = %016lx\n",
(unsigned long)iova);
dev_err(smmu->dev,
- "FSR = %08x [%s%s%s%s%s%s%s%s%s]\n",
+ "FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n",
fsr,
- (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x02) ? (fsynr0 & 0x10 ?
+ "TF W " : "TF R ") : "",
(fsr & 0x04) ? "AFF " : "",
- (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x08) ? (fsynr0 & 0x10 ?
+ "PF W " : "PF R ") : "",
(fsr & 0x10) ? "EF " : "",
(fsr & 0x20) ? "TLBMCF " : "",
(fsr & 0x40) ? "TLBLKF " : "",
(fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x100) ? "UUT " : "",
(fsr & 0x40000000) ? "SS " : "",
(fsr & 0x80000000) ? "MULTI " : "");
dev_err(smmu->dev,
@@ -1342,6 +1376,10 @@
dev_err(smmu->dev,
"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
dev_name(smmu->dev));
+ else {
+ pte = arm_smmu_iova_to_pte(domain, iova);
+ dev_err(smmu->dev, "PTE = %016llx\n", pte);
+ }
if (phys_atos)
dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
&phys_atos);
@@ -1591,6 +1629,9 @@
reg |= SCTLR_HUPCF;
}
+ if (attributes & (1 << DOMAIN_ATTR_NO_CFRE))
+ reg &= ~SCTLR_CFRE;
+
if ((!(attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
!(attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) || !stage1)
reg |= SCTLR_M;
@@ -1796,12 +1837,6 @@
goto out_unlock;
cfg->cbndx = ret;
- if (smmu->version < ARM_SMMU_V2) {
- cfg->irptndx = atomic_inc_return(&smmu->irptndx);
- cfg->irptndx %= smmu->num_context_irqs;
- } else {
- cfg->irptndx = cfg->cbndx;
- }
if (arm_smmu_is_slave_side_secure(smmu_domain)) {
smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
@@ -1870,6 +1905,12 @@
if (ret)
goto out_clear_smmu;
+ if (smmu->version < ARM_SMMU_V2) {
+ cfg->irptndx = atomic_inc_return(&smmu->irptndx);
+ cfg->irptndx %= smmu->num_context_irqs;
+ } else {
+ cfg->irptndx = cfg->cbndx;
+ }
/*
* Request context fault interrupt. Do this last to avoid the
@@ -2000,6 +2041,7 @@
INIT_LIST_HEAD(&smmu_domain->unassign_list);
mutex_init(&smmu_domain->assign_lock);
INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
+ INIT_LIST_HEAD(&smmu_domain->nonsecure_pool);
arm_smmu_domain_reinit(smmu_domain);
return &smmu_domain->domain;
@@ -2424,6 +2466,60 @@
return 0;
}
+static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
+ size_t size, struct list_head *pool)
+{
+ int i;
+ u32 nr = 0;
+ struct page *page;
+
+ if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
+ arm_smmu_has_secure_vmid(smmu_domain))
+ return;
+
+ /* number of 2nd level pagetable entries */
+ nr += round_up(size, SZ_1G) >> 30;
+ /* number of 3rd level pagetabel entries */
+ nr += round_up(size, SZ_2M) >> 21;
+
+ /* Retry later with atomic allocation on error */
+ for (i = 0; i < nr; i++) {
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page)
+ break;
+ list_add(&page->lru, pool);
+ }
+}
+
+static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
+ struct scatterlist *sgl, int nents,
+ struct list_head *pool)
+{
+ int i;
+ size_t size = 0;
+ struct scatterlist *sg;
+
+ if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
+ arm_smmu_has_secure_vmid(smmu_domain))
+ return;
+
+ for_each_sg(sgl, sg, nents, i)
+ size += sg->length;
+
+ arm_smmu_prealloc_memory(smmu_domain, size, pool);
+}
+
+static void arm_smmu_release_prealloc_memory(
+ struct arm_smmu_domain *smmu_domain, struct list_head *list)
+{
+ struct page *page, *tmp;
+
+ list_for_each_entry_safe(page, tmp, list, lru) {
+ list_del(&page->lru);
+ __free_pages(page, 0);
+ }
+}
+
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
@@ -2502,6 +2598,7 @@
unsigned long flags;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
+ LIST_HEAD(nonsecure_pool);
if (!ops)
return -ENODEV;
@@ -2509,15 +2606,19 @@
if (arm_smmu_is_slave_side_secure(smmu_domain))
return msm_secure_smmu_map(domain, iova, paddr, size, prot);
+ arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
arm_smmu_secure_domain_lock(smmu_domain);
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
ret = ops->map(ops, iova, paddr, size, prot);
+ list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
arm_smmu_assign_table(smmu_domain);
arm_smmu_secure_domain_unlock(smmu_domain);
+ arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
return ret;
}
@@ -2595,6 +2696,7 @@
unsigned int idx_start, idx_end;
struct scatterlist *sg_start, *sg_end;
unsigned long __saved_iova_start;
+ LIST_HEAD(nonsecure_pool);
if (!ops)
return -ENODEV;
@@ -2602,9 +2704,8 @@
if (arm_smmu_is_slave_side_secure(smmu_domain))
return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);
- ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
- if (ret)
- return ret;
+ arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
+ arm_smmu_secure_domain_lock(smmu_domain);
__saved_iova_start = iova;
idx_start = idx_end = 0;
@@ -2622,9 +2723,12 @@
}
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
+ list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
prot, &size);
+ list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
+
/* Returns 0 on error */
if (!ret) {
size_to_unmap = iova + size - __saved_iova_start;
@@ -2643,7 +2747,8 @@
arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
iova = __saved_iova_start;
}
- arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+ arm_smmu_secure_domain_unlock(smmu_domain);
+ arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
return iova - __saved_iova_start;
}
@@ -3094,6 +3199,11 @@
& (1 << DOMAIN_ATTR_CB_STALL_DISABLE));
ret = 0;
break;
+ case DOMAIN_ATTR_NO_CFRE:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_NO_CFRE));
+ ret = 0;
+ break;
case DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN:
*((int *)data) = smmu_domain->qsmmuv500_errata1_min_iova_align;
ret = 0;
@@ -3238,16 +3348,6 @@
ret = 0;
break;
case DOMAIN_ATTR_USE_UPSTREAM_HINT:
- /* can't be changed while attached */
- if (smmu_domain->smmu != NULL) {
- ret = -EBUSY;
- break;
- }
- if (*((int *)data))
- smmu_domain->attributes |=
- 1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
- ret = 0;
- break;
case DOMAIN_ATTR_USE_LLC_NWA:
/* can't be changed while attached */
if (smmu_domain->smmu != NULL) {
@@ -3256,7 +3356,7 @@
}
if (*((int *)data))
smmu_domain->attributes |=
- 1 << DOMAIN_ATTR_USE_LLC_NWA;
+ 1 << attr;
ret = 0;
break;
case DOMAIN_ATTR_EARLY_MAP: {
@@ -3278,9 +3378,11 @@
break;
}
case DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR:
+ case DOMAIN_ATTR_CB_STALL_DISABLE:
+ case DOMAIN_ATTR_NO_CFRE:
if (*((int *)data))
smmu_domain->attributes |=
- 1 << DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR;
+ 1 << attr;
ret = 0;
break;
case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
@@ -3303,13 +3405,6 @@
ret = 0;
break;
}
-
- case DOMAIN_ATTR_CB_STALL_DISABLE:
- if (*((int *)data))
- smmu_domain->attributes |=
- 1 << DOMAIN_ATTR_CB_STALL_DISABLE;
- ret = 0;
- break;
default:
ret = -ENODEV;
}
@@ -3723,7 +3818,7 @@
for (i = 0; i < smmu->num_context_banks; ++i) {
cb_base = ARM_SMMU_CB(smmu, i);
- writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
+ arm_smmu_write_context_bank(smmu, i, 0);
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
/*
* Disable MMU-500's not-particularly-beneficial next-page
@@ -4680,8 +4775,15 @@
static int __maybe_unused arm_smmu_pm_resume(struct device *dev)
{
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
+ int ret;
+
+ ret = arm_smmu_power_on(smmu->pwr);
+ if (ret)
+ return ret;
arm_smmu_device_reset(smmu);
+ arm_smmu_power_off(smmu->pwr);
+
return 0;
}
@@ -4692,6 +4794,7 @@
.name = "arm-smmu",
.of_match_table = of_match_ptr(arm_smmu_of_match),
.pm = &arm_smmu_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = arm_smmu_device_dt_probe,
.remove = arm_smmu_device_remove,
@@ -4702,10 +4805,12 @@
{
static bool registered;
int ret = 0;
+ ktime_t cur;
if (registered)
return 0;
+ cur = ktime_get();
ret = platform_driver_register(&qsmmuv500_tbu_driver);
if (ret)
return ret;
@@ -4715,6 +4820,8 @@
ret = register_iommu_sec_ptbl();
#endif
registered = !ret;
+ trace_smmu_init(ktime_us_delta(ktime_get(), cur));
+
return ret;
}
@@ -4820,6 +4927,12 @@
u32 halt, fsr, sctlr_orig, sctlr, status;
void __iomem *base, *cb_base;
+ if (of_property_read_bool(tbu->dev->of_node,
+ "qcom,opt-out-tbu-halting")) {
+ dev_notice(tbu->dev, "TBU opted-out for halting!\n");
+ return -EBUSY;
+ }
+
spin_lock_irqsave(&tbu->halt_lock, flags);
if (tbu->halt_count) {
tbu->halt_count++;
@@ -5040,8 +5153,8 @@
val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (fsr & FSR_FAULT) {
- dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx\n",
- fsr);
+ dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx, SID=0x%x\n",
+ fsr, sid);
/* Clear pending interrupts */
writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
@@ -5184,8 +5297,9 @@
* Prefetch only works properly if the start and end of all
* buffers in the page table are aligned to ARM_SMMU_MIN_IOVA_ALIGN.
*/
- if ((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
- QSMMUV500_ACTLR_DEEP_PREFETCH_MASK)
+ if (((iommudata->actlr >> QSMMUV500_ACTLR_DEEP_PREFETCH_SHIFT) &
+ QSMMUV500_ACTLR_DEEP_PREFETCH_MASK) &&
+ (smmu->options & ARM_SMMU_OPT_MIN_IOVA_ALIGN))
smmu_domain->qsmmuv500_errata1_min_iova_align = true;
/*
@@ -5275,6 +5389,9 @@
data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
smmu->archdata = data;
+ if (arm_smmu_is_static_cb(smmu))
+ return 0;
+
ret = qsmmuv500_read_actlr_tbl(smmu);
if (ret)
return ret;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index bb2e600..f92d411 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -336,6 +336,7 @@
return 0;
}
+ iovad->end_pfn = end_pfn;
init_iova_domain(iovad, 1UL << order, base_pfn);
if (!dev)
return 0;
@@ -425,7 +426,7 @@
* rb_tree.
*/
limit = min_t(dma_addr_t, DMA_BIT_MASK(32) >> shift,
- iovad->dma_32bit_pfn);
+ iovad->end_pfn);
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
@@ -433,7 +434,7 @@
if (!iova) {
limit = min_t(dma_addr_t, dma_limit >> shift,
- iovad->dma_32bit_pfn);
+ iovad->end_pfn);
iova = alloc_iova_fast(iovad, iova_len, limit, true);
}
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 7099644..0069a05 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pci.h>
+#include <trace/events/iommu.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/arm-smmu-errata.h>
@@ -398,6 +399,8 @@
fast_dmac_clean_range(mapping, pmd, pmd + nptes);
spin_unlock_irqrestore(&mapping->lock, flags);
+
+ trace_map(mapping->domain, iova, phys_to_map, len, prot);
return iova + offset_from_phys_to_map;
fail_free_iova:
@@ -429,6 +432,8 @@
fast_dmac_clean_range(mapping, pmd, pmd + nptes);
__fast_smmu_free_iova(mapping, iova - offset, len);
spin_unlock_irqrestore(&mapping->lock, flags);
+
+ trace_unmap(mapping->domain, iova - offset, len, len);
}
static void fast_smmu_sync_single_for_cpu(struct device *dev,
@@ -459,7 +464,8 @@
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- return -EINVAL;
+ /* 0 indicates error */
+ return 0;
}
static void fast_smmu_unmap_sg(struct device *dev,
@@ -914,7 +920,7 @@
int fast_smmu_init_mapping(struct device *dev,
struct dma_iommu_mapping *mapping)
{
- int err;
+ int err = 0;
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
u64 size = (u64)mapping->bits << PAGE_SHIFT;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 8e7f539..05f6494 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -213,6 +213,7 @@
unsigned long bits_per_level;
void *pgd;
+ void *pgd_ttbr1;
};
typedef u64 arm_lpae_iopte;
@@ -406,21 +407,6 @@
BUG_ON(!suppress_map_failures);
return -EEXIST;
}
- if (iopte_leaf(pte, lvl)) {
- WARN_ON(!selftest_running);
- return -EEXIST;
- } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
- /*
- * We need to unmap and free the old table before
- * overwriting it with a block entry.
- */
- arm_lpae_iopte *tblp;
- size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
-
- tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
- if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
- return -EINVAL;
- }
__arm_lpae_init_pte(data, paddr, prot, lvl, ptep, flush);
@@ -754,6 +740,8 @@
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
+ data->pgd_ttbr1);
kfree(data);
}
@@ -1219,14 +1207,22 @@
if (!data->pgd)
goto out_free_data;
+ data->pgd_ttbr1 = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
+ cfg, cookie);
+ if (!data->pgd_ttbr1)
+ goto out_free_pgd;
+
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* TTBRs */
cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
- cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd_ttbr1);
return &data->iop;
+out_free_pgd:
+ __arm_lpae_free_pages(data->pgd, data->pgd_size, cfg, cookie);
+
out_free_data:
kfree(data);
return NULL;
diff --git a/drivers/iommu/io-pgtable-msm-secure.c b/drivers/iommu/io-pgtable-msm-secure.c
new file mode 100644
index 0000000..ade9d41
--- /dev/null
+++ b/drivers/iommu/io-pgtable-msm-secure.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "io-pgtable-msm-secure: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <soc/qcom/scm.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "io-pgtable.h"
+
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+#define io_pgtable_to_data(x) \
+ container_of((x), struct msm_secure_io_pgtable, iop)
+
+#define io_pgtable_ops_to_pgtable(x) \
+ container_of((x), struct io_pgtable, ops)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+struct msm_secure_io_pgtable {
+ struct io_pgtable iop;
+ /* lock required while operating on page tables */
+ struct mutex pgtbl_lock;
+};
+
+int msm_iommu_sec_pgtbl_init(void)
+{
+ int psize[2] = {0, 0};
+ unsigned int spare = 0;
+ int ret, ptbl_ret = 0;
+ struct device dev = {0};
+ void *cpu_addr;
+ dma_addr_t paddr;
+ unsigned long attrs = 0;
+
+ if (is_scm_armv8()) {
+ struct scm_desc desc = {0};
+
+ desc.args[0] = spare;
+ desc.arginfo = SCM_ARGS(1);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_PTBL_SIZE), &desc);
+ psize[0] = desc.ret[0];
+ psize[1] = desc.ret[1];
+ if (ret || psize[1]) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ return ret;
+ }
+ }
+
+ /* Now allocate memory for the secure page tables */
+ attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+ dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ arch_setup_dma_ops(&dev, 0, 0, NULL, 1);
+ cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, attrs);
+ if (!cpu_addr) {
+ pr_err("%s: Failed to allocate %d bytes for PTBL\n",
+ __func__, psize[0]);
+ return -ENOMEM;
+ }
+
+ if (is_scm_armv8()) {
+ struct scm_desc desc = {0};
+
+ desc.args[0] = paddr;
+ desc.args[1] = psize[0];
+ desc.args[2] = 0;
+ desc.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_PTBL_INIT), &desc);
+ ptbl_ret = desc.ret[0];
+
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
+ return ret;
+ }
+
+ if (ptbl_ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_iommu_sec_pgtbl_init);
+
+static int msm_secure_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int iommu_prot)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ void *flush_va, *flush_va_end;
+ struct scm_desc desc = {0};
+ int ret = -EINVAL;
+ u32 resp;
+
+ if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(paddr, SZ_1M) ||
+ !IS_ALIGNED(size, SZ_1M))
+ return -EINVAL;
+
+ desc.args[0] = virt_to_phys(&paddr);
+ desc.args[1] = 1;
+ desc.args[2] = size;
+ desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
+ desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
+ desc.args[5] = iova;
+ desc.args[6] = size;
+ desc.args[7] = 0;
+
+ flush_va = &paddr;
+ flush_va_end = (void *)
+ (((unsigned long) flush_va) + sizeof(phys_addr_t));
+
+ mutex_lock(&data->pgtbl_lock);
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ dmac_clean_range(flush_va, flush_va_end);
+
+ desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+ SCM_VAL, SCM_VAL, SCM_VAL);
+
+ if (is_scm_armv8()) {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_MAP2_FLAT), &desc);
+ resp = desc.ret[0];
+ }
+ mutex_unlock(&data->pgtbl_lock);
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+static dma_addr_t msm_secure_get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ dma_addr_t pa = sg_dma_address(sg);
+
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+static int msm_secure_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int iommu_prot, size_t *size)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int ret = -EINVAL;
+ struct scatterlist *tmp, *sgiter;
+ dma_addr_t *pa_list = 0;
+ unsigned int cnt, offset = 0, chunk_offset = 0;
+ dma_addr_t pa;
+ void *flush_va, *flush_va_end;
+ unsigned long len = 0;
+ struct scm_desc desc = {0};
+ int i;
+ u32 resp;
+
+ for_each_sg(sg, tmp, nents, i)
+ len += tmp->length;
+
+ if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ if (sg->length == len) {
+ cnt = 1;
+ pa = msm_secure_get_phys_addr(sg);
+ if (!IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+
+ desc.args[0] = virt_to_phys(&pa);
+ desc.args[1] = cnt;
+ desc.args[2] = len;
+ flush_va = &pa;
+ } else {
+ sgiter = sg;
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt = sg->length / SZ_1M;
+ while ((sgiter = sg_next(sgiter))) {
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt += sgiter->length / SZ_1M;
+ }
+
+ pa_list = kmalloc_array(cnt, sizeof(*pa_list), GFP_KERNEL);
+ if (!pa_list)
+ return -ENOMEM;
+
+ sgiter = sg;
+ cnt = 0;
+ pa = msm_secure_get_phys_addr(sgiter);
+ while (offset < len) {
+
+ if (!IS_ALIGNED(pa, SZ_1M)) {
+ kfree(pa_list);
+ return -EINVAL;
+ }
+
+ pa_list[cnt] = pa + chunk_offset;
+ chunk_offset += SZ_1M;
+ offset += SZ_1M;
+ cnt++;
+
+ if (chunk_offset >= sgiter->length && offset < len) {
+ chunk_offset = 0;
+ sgiter = sg_next(sgiter);
+ pa = msm_secure_get_phys_addr(sgiter);
+ }
+ }
+
+ desc.args[0] = virt_to_phys(pa_list);
+ desc.args[1] = cnt;
+ desc.args[2] = SZ_1M;
+ flush_va = pa_list;
+ }
+
+ desc.args[3] = cfg->arm_msm_secure_cfg.sec_id;
+ desc.args[4] = cfg->arm_msm_secure_cfg.cbndx;
+ desc.args[5] = iova;
+ desc.args[6] = len;
+ desc.args[7] = 0;
+
+ desc.arginfo = SCM_ARGS(8, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+ SCM_VAL, SCM_VAL, SCM_VAL);
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+
+ flush_va_end = (void *) (((unsigned long) flush_va) +
+ (cnt * sizeof(*pa_list)));
+
+ mutex_lock(&data->pgtbl_lock);
+ dmac_clean_range(flush_va, flush_va_end);
+
+ if (is_scm_armv8()) {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_MAP2_FLAT), &desc);
+ resp = desc.ret[0];
+
+ if (ret || resp)
+ ret = -EINVAL;
+ else
+ ret = len;
+ }
+ mutex_unlock(&data->pgtbl_lock);
+
+ kfree(pa_list);
+ return ret;
+}
+
+static size_t msm_secure_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t len)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int ret = -EINVAL;
+ struct scm_desc desc = {0};
+
+ if (!IS_ALIGNED(iova, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return ret;
+
+ desc.args[0] = cfg->arm_msm_secure_cfg.sec_id;
+ desc.args[1] = cfg->arm_msm_secure_cfg.cbndx;
+ desc.args[2] = iova;
+ desc.args[3] = len;
+ desc.args[4] = IOMMU_TLBINVAL_FLAG;
+ desc.arginfo = SCM_ARGS(5);
+
+ mutex_lock(&data->pgtbl_lock);
+ if (is_scm_armv8()) {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ IOMMU_SECURE_UNMAP2_FLAT), &desc);
+
+ if (!ret)
+ ret = len;
+ }
+ mutex_unlock(&data->pgtbl_lock);
+ return ret;
+}
+
+static phys_addr_t msm_secure_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ return -EINVAL;
+}
+
+static struct msm_secure_io_pgtable *
+msm_secure_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
+{
+ struct msm_secure_io_pgtable *data;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = msm_secure_map,
+ .map_sg = msm_secure_map_sg,
+ .unmap = msm_secure_unmap,
+ .iova_to_phys = msm_secure_iova_to_phys,
+ };
+ mutex_init(&data->pgtbl_lock);
+
+ return data;
+}
+
+static struct io_pgtable *
+msm_secure_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct msm_secure_io_pgtable *data =
+ msm_secure_alloc_pgtable_data(cfg);
+
+ return &data->iop;
+}
+
+static void msm_secure_free_pgtable(struct io_pgtable *iop)
+{
+ struct msm_secure_io_pgtable *data = io_pgtable_to_data(iop);
+
+ kfree(data);
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_msm_secure_init_fns = {
+ .alloc = msm_secure_alloc_pgtable,
+ .free = msm_secure_free_pgtable,
+};
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index ccc88b1..62759e2 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -29,7 +29,7 @@
#include <asm/cacheflush.h>
#include <asm/dma-iommu.h>
-#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
+#if defined(CONFIG_IOMMU_TESTS)
static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
{
@@ -103,20 +103,25 @@
struct iommu_debug_attachment *attach;
struct iommu_group *group;
- group = iommu_group_get(dev);
+ group = dev->iommu_group;
if (!group)
return;
+ mutex_lock(&iommu_debug_attachments_lock);
+ list_for_each_entry(attach, &iommu_debug_attachments, list)
+ if ((attach->domain == domain) && (attach->group == group))
+ goto out;
+
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
- return;
+ goto out;
attach->domain = domain;
attach->group = group;
INIT_LIST_HEAD(&attach->list);
- mutex_lock(&iommu_debug_attachments_lock);
list_add(&attach->list, &iommu_debug_attachments);
+out:
mutex_unlock(&iommu_debug_attachments_lock);
}
@@ -129,7 +134,6 @@
if (it->domain != domain)
continue;
list_del(&it->list);
- iommu_group_put(it->group);
kfree(it);
}
@@ -167,6 +171,8 @@
u64 phys;
size_t len;
struct list_head list;
+ struct mutex clk_lock;
+ unsigned int clk_count;
};
static int iommu_debug_build_phoney_sg_table(struct device *dev,
@@ -1275,7 +1281,7 @@
}
ddev->domain->is_debug_domain = true;
-
+ val = VMID_CP_CAMERA;
if (is_secure && iommu_domain_set_attr(ddev->domain,
DOMAIN_ATTR_SECURE_VMID,
&val)) {
@@ -1568,6 +1574,10 @@
ssize_t retval;
size_t buflen;
+ if (kptr_restrict != 0) {
+ pr_err("kptr_restrict needs to be disabled.\n");
+ return -EPERM;
+ }
if (!dev->archdata.mapping) {
pr_err("No mapping. Did you already attach?\n");
return -EINVAL;
@@ -1635,6 +1645,10 @@
ssize_t retval;
size_t buflen;
+ if (kptr_restrict != 0) {
+ pr_err("kptr_restrict needs to be disabled.\n");
+ return -EPERM;
+ }
if (!ddev->domain) {
pr_err("No domain. Did you already attach?\n");
return -EINVAL;
@@ -1683,6 +1697,10 @@
ssize_t retval;
size_t buflen;
+ if (kptr_restrict != 0) {
+ pr_err("kptr_restrict needs to be disabled.\n");
+ return -EPERM;
+ }
if (!dev->archdata.mapping) {
pr_err("No mapping. Did you already attach?\n");
return -EINVAL;
@@ -2129,20 +2147,34 @@
return -EFAULT;
}
+ mutex_lock(&ddev->clk_lock);
switch (buf) {
case '0':
+ if (ddev->clk_count == 0) {
+ dev_err(dev, "Config clocks already disabled\n");
+ break;
+ }
+
+ if (--ddev->clk_count > 0)
+ break;
+
dev_err(dev, "Disabling config clocks\n");
iommu_disable_config_clocks(ddev->domain);
break;
case '1':
+ if (ddev->clk_count++ > 0)
+ break;
+
dev_err(dev, "Enabling config clocks\n");
if (iommu_enable_config_clocks(ddev->domain))
dev_err(dev, "Failed!\n");
break;
default:
dev_err(dev, "Invalid value. Should be 0 or 1.\n");
+ mutex_unlock(&ddev->clk_lock);
return -EINVAL;
}
+ mutex_unlock(&ddev->clk_lock);
return count;
}
@@ -2192,6 +2224,9 @@
if (!of_find_property(dev->of_node, "iommus", NULL))
return 0;
+ if (!of_device_is_compatible(dev->of_node, "iommu-debug-test"))
+ return 0;
+
/* Hold a reference count */
if (!iommu_group_get(dev))
return 0;
@@ -2200,6 +2235,7 @@
if (!ddev)
return -ENODEV;
+ mutex_init(&ddev->clk_lock);
ddev->dev = dev;
dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
if (!dir) {
diff --git a/drivers/iommu/iommu-debugfs.c b/drivers/iommu/iommu-debugfs.c
index 3b1bf88..ced14f8 100644
--- a/drivers/iommu/iommu-debugfs.c
+++ b/drivers/iommu/iommu-debugfs.c
@@ -31,6 +31,7 @@
{
if (!iommu_debugfs_dir) {
iommu_debugfs_dir = debugfs_create_dir("iommu", NULL);
+ iommu_debugfs_top = iommu_debugfs_dir;
pr_warn("\n");
pr_warn("*************************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 67fe773..9781ca7 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -635,6 +635,7 @@
if (ret)
goto err_put_group;
+
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
@@ -1304,6 +1305,7 @@
/* Assume all sizes by default; the driver may override this later */
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
domain->is_debug_domain = false;
+ memset(domain->name, 0, IOMMU_DOMAIN_NAME_LEN);
return domain;
}
@@ -1336,6 +1338,11 @@
if (!ret) {
trace_attach_device_to_domain(dev);
iommu_debug_attach_device(domain, dev);
+
+ if (!strnlen(domain->name, IOMMU_DOMAIN_NAME_LEN)) {
+ strlcpy(domain->name, dev_name(dev),
+ IOMMU_DOMAIN_NAME_LEN);
+ }
}
return ret;
}
@@ -1630,7 +1637,7 @@
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
else
- trace_map(orig_iova, orig_paddr, orig_size);
+ trace_map(domain, orig_iova, orig_paddr, orig_size, prot);
return ret;
}
@@ -1692,7 +1699,7 @@
if (sync && ops->iotlb_sync)
ops->iotlb_sync(domain);
- trace_unmap(orig_iova, size, unmapped);
+ trace_unmap(domain, orig_iova, size, unmapped);
return unmapped;
}
@@ -1710,6 +1717,18 @@
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
+size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ size_t mapped;
+
+ mapped = domain->ops->map_sg(domain, iova, sg, nents, prot);
+ trace_map_sg(domain, iova, mapped, prot);
+ return mapped;
+}
+EXPORT_SYMBOL(iommu_map_sg);
+
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 883f015..3122536 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -191,7 +191,7 @@
mutex_lock(&iommu_meta->lock);
iommu_map = msm_iommu_lookup(iommu_meta, dev);
if (!iommu_map) {
- iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
+ iommu_map = kmalloc(sizeof(*iommu_map), GFP_KERNEL);
if (!iommu_map) {
ret = -ENOMEM;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ad3fcad..2b83a41 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -49,7 +49,13 @@
bool
config OF_KOBJ
+ bool "Display devicetree in sysfs"
def_bool SYSFS
+ help
+ Some embedded platforms have no need to display the devicetree
+ nodes and properties in sysfs. Disabling this option will save
+ a small amount of memory, as well as decrease boot time. By
+ default this option will be enabled if SYSFS is enabled.
# Hardly any platforms need this. It is safe to select, but only do so if you
# need it.
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 895c83e..779d6228 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -20,6 +20,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/sort.h>
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#define MAX_RESERVED_REGIONS 32
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
@@ -50,8 +51,10 @@
}
*res_base = base;
- if (nomap)
+ if (nomap) {
+ kmemleak_ignore_phys(base);
return memblock_remove(base, size);
+ }
return 0;
}
#else
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 4e18ba9..635dab9 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -43,7 +43,9 @@
static const char * const power_supply_type_text[] = {
"Unknown", "Battery", "UPS", "Mains", "USB",
"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
- "USB_PD", "USB_PD_DRP", "BrickID"
+ "USB_PD", "USB_PD_DRP", "BrickID",
+ "USB_HVDCP", "USB_HVDCP_3", "Wireless", "USB_FLOAT",
+ "BMS", "Parallel", "Main", "Wipower", "USB_C_UFP", "USB_C_DFP",
};
static const char * const power_supply_usb_type_text[] = {
@@ -56,13 +58,14 @@
};
static const char * const power_supply_charge_type_text[] = {
- "Unknown", "N/A", "Trickle", "Fast"
+ "Unknown", "N/A", "Trickle", "Fast", "Taper"
};
static const char * const power_supply_health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
"Unspecified failure", "Cold", "Watchdog timer expire",
- "Safety timer expire"
+ "Safety timer expire",
+ "Warm", "Cool", "Hot"
};
static const char * const power_supply_technology_text[] = {
@@ -78,6 +81,23 @@
"Unknown", "System", "Device"
};
+static const char * const power_supply_usbc_text[] = {
+ "Nothing attached", "Sink attached", "Powered cable w/ sink",
+ "Debug Accessory", "Audio Adapter", "Powered cable w/o sink",
+ "Source attached (default current)",
+ "Source attached (medium current)",
+ "Source attached (high current)",
+ "Non compliant",
+};
+
+static const char * const power_supply_usbc_pr_text[] = {
+ "none", "dual power role", "sink", "source"
+};
+
+static const char * const power_supply_typec_src_rp_text[] = {
+ "Rp-Default", "Rp-1.5A", "Rp-3A"
+};
+
static ssize_t power_supply_show_usb_type(struct device *dev,
enum power_supply_usb_type *usb_types,
ssize_t num_usb_types,
@@ -159,6 +179,7 @@
power_supply_capacity_level_text[value.intval]);
break;
case POWER_SUPPLY_PROP_TYPE:
+ case POWER_SUPPLY_PROP_REAL_TYPE:
ret = sprintf(buf, "%s\n",
power_supply_type_text[value.intval]);
break;
@@ -171,6 +192,23 @@
ret = sprintf(buf, "%s\n",
power_supply_scope_text[value.intval]);
break;
+ case POWER_SUPPLY_PROP_TYPEC_MODE:
+ ret = sprintf(buf, "%s\n",
+ power_supply_usbc_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+ ret = sprintf(buf, "%s\n",
+ power_supply_usbc_pr_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_SRC_RP:
+ ret = sprintf(buf, "%s\n",
+ power_supply_typec_src_rp_text[value.intval]);
+ break;
+ case POWER_SUPPLY_PROP_DIE_HEALTH:
+ case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+ ret = sprintf(buf, "%s\n",
+ power_supply_health_text[value.intval]);
+ break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT:
ret = sprintf(buf, "%lld\n", value.int64val);
break;
@@ -309,12 +347,112 @@
POWER_SUPPLY_ATTR(usb_hc),
POWER_SUPPLY_ATTR(usb_otg),
POWER_SUPPLY_ATTR(charge_enabled),
+ POWER_SUPPLY_ATTR(set_ship_mode),
+ POWER_SUPPLY_ATTR(real_type),
+ POWER_SUPPLY_ATTR(charge_now_raw),
+ POWER_SUPPLY_ATTR(charge_now_error),
+ POWER_SUPPLY_ATTR(capacity_raw),
+ POWER_SUPPLY_ATTR(battery_charging_enabled),
+ POWER_SUPPLY_ATTR(charging_enabled),
+ POWER_SUPPLY_ATTR(step_charging_enabled),
+ POWER_SUPPLY_ATTR(step_charging_step),
+ POWER_SUPPLY_ATTR(pin_enabled),
+ POWER_SUPPLY_ATTR(input_suspend),
+ POWER_SUPPLY_ATTR(input_voltage_regulation),
+ POWER_SUPPLY_ATTR(input_current_max),
+ POWER_SUPPLY_ATTR(input_current_trim),
+ POWER_SUPPLY_ATTR(input_current_settled),
+ POWER_SUPPLY_ATTR(input_voltage_settled),
+ POWER_SUPPLY_ATTR(bypass_vchg_loop_debouncer),
+ POWER_SUPPLY_ATTR(charge_counter_shadow),
+ POWER_SUPPLY_ATTR(hi_power),
+ POWER_SUPPLY_ATTR(low_power),
+ POWER_SUPPLY_ATTR(temp_cool),
+ POWER_SUPPLY_ATTR(temp_warm),
+ POWER_SUPPLY_ATTR(temp_cold),
+ POWER_SUPPLY_ATTR(temp_hot),
+ POWER_SUPPLY_ATTR(system_temp_level),
+ POWER_SUPPLY_ATTR(resistance),
+ POWER_SUPPLY_ATTR(resistance_capacitive),
+ POWER_SUPPLY_ATTR(resistance_id),
+ POWER_SUPPLY_ATTR(resistance_now),
+ POWER_SUPPLY_ATTR(flash_current_max),
+ POWER_SUPPLY_ATTR(update_now),
+ POWER_SUPPLY_ATTR(esr_count),
+ POWER_SUPPLY_ATTR(buck_freq),
+ POWER_SUPPLY_ATTR(boost_current),
+ POWER_SUPPLY_ATTR(safety_timer_enabled),
+ POWER_SUPPLY_ATTR(charge_done),
+ POWER_SUPPLY_ATTR(flash_active),
+ POWER_SUPPLY_ATTR(flash_trigger),
+ POWER_SUPPLY_ATTR(force_tlim),
+ POWER_SUPPLY_ATTR(dp_dm),
+ POWER_SUPPLY_ATTR(input_current_limited),
+ POWER_SUPPLY_ATTR(input_current_now),
+ POWER_SUPPLY_ATTR(charge_qnovo_enable),
+ POWER_SUPPLY_ATTR(current_qnovo),
+ POWER_SUPPLY_ATTR(voltage_qnovo),
+ POWER_SUPPLY_ATTR(rerun_aicl),
+ POWER_SUPPLY_ATTR(cycle_count_id),
+ POWER_SUPPLY_ATTR(safety_timer_expired),
+ POWER_SUPPLY_ATTR(restricted_charging),
+ POWER_SUPPLY_ATTR(current_capability),
+ POWER_SUPPLY_ATTR(typec_mode),
+ POWER_SUPPLY_ATTR(typec_cc_orientation),
+ POWER_SUPPLY_ATTR(typec_power_role),
+ POWER_SUPPLY_ATTR(typec_src_rp),
+ POWER_SUPPLY_ATTR(pd_allowed),
+ POWER_SUPPLY_ATTR(pd_active),
+ POWER_SUPPLY_ATTR(pd_in_hard_reset),
+ POWER_SUPPLY_ATTR(pd_current_max),
+ POWER_SUPPLY_ATTR(pd_usb_suspend_supported),
+ POWER_SUPPLY_ATTR(charger_temp),
+ POWER_SUPPLY_ATTR(charger_temp_max),
+ POWER_SUPPLY_ATTR(parallel_disable),
+ POWER_SUPPLY_ATTR(pe_start),
+ POWER_SUPPLY_ATTR(soc_reporting_ready),
+ POWER_SUPPLY_ATTR(debug_battery),
+ POWER_SUPPLY_ATTR(fcc_delta),
+ POWER_SUPPLY_ATTR(icl_reduction),
+ POWER_SUPPLY_ATTR(parallel_mode),
+ POWER_SUPPLY_ATTR(die_health),
+ POWER_SUPPLY_ATTR(connector_health),
+ POWER_SUPPLY_ATTR(ctm_current_max),
+ POWER_SUPPLY_ATTR(hw_current_max),
+ POWER_SUPPLY_ATTR(pr_swap),
+ POWER_SUPPLY_ATTR(cc_step),
+ POWER_SUPPLY_ATTR(cc_step_sel),
+ POWER_SUPPLY_ATTR(sw_jeita_enabled),
+ POWER_SUPPLY_ATTR(pd_voltage_max),
+ POWER_SUPPLY_ATTR(pd_voltage_min),
+ POWER_SUPPLY_ATTR(sdp_current_max),
+ POWER_SUPPLY_ATTR(connector_type),
+ POWER_SUPPLY_ATTR(parallel_batfet_mode),
+ POWER_SUPPLY_ATTR(parallel_fcc_max),
+ POWER_SUPPLY_ATTR(min_icl),
+ POWER_SUPPLY_ATTR(moisture_detected),
+ POWER_SUPPLY_ATTR(batt_profile_version),
+ POWER_SUPPLY_ATTR(batt_full_current),
+ POWER_SUPPLY_ATTR(recharge_soc),
+ POWER_SUPPLY_ATTR(hvdcp_opti_allowed),
+ POWER_SUPPLY_ATTR(smb_en_mode),
+ POWER_SUPPLY_ATTR(smb_en_reason),
+ POWER_SUPPLY_ATTR(esr_actual),
+ POWER_SUPPLY_ATTR(esr_nominal),
+ POWER_SUPPLY_ATTR(soh),
+ POWER_SUPPLY_ATTR(clear_soh),
+ POWER_SUPPLY_ATTR(force_recharge),
+ POWER_SUPPLY_ATTR(fcc_stepper_enable),
+ POWER_SUPPLY_ATTR(toggle_stat),
+ POWER_SUPPLY_ATTR(main_fcc_max),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
POWER_SUPPLY_ATTR(serial_number),
+ POWER_SUPPLY_ATTR(battery_type),
+ POWER_SUPPLY_ATTR(cycle_counts),
};
static struct attribute *
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 6e81fcc..14cd691 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -365,6 +365,16 @@
for hang. By using sysfs entries core hang detection can be
enabled or disabled dynamically.
+config QCOM_FSA4480_I2C
+ bool "Fairchild FSA4480 chip with I2C"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Support for the Fairchild FSA4480 IC switch chip controlled
+ using I2C. This driver provides common support
+ for accessing the device, switching between USB and Audio
+ modes, changing orientation.
+
config QCOM_WATCHDOG_V2
bool "QTI Watchdog Support"
depends on ARCH_QCOM
@@ -373,4 +383,49 @@
watchdog times out. It allows for detection of cpu hangs and
deadlocks. It does not run during the bootup process, so it will
not catch any early lockups.
+
+config QCOM_RPMH
+ bool "Qualcomm Technologies, Inc. RPM-Hardened (RPMH) Communication"
+ depends on ARCH_QCOM && ARM64 && OF || COMPILE_TEST
+ help
+ Support for communication with the hardened-RPM blocks in
+ Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
+ internal bus to transmit state requests for shared resources. A set
+ of hardware components aggregate requests for these resources and
+ help apply the aggregated state on the resource.
+
+config QCOM_BUS_SCALING
+ bool "Bus scaling driver"
+ help
+ This option enables bus scaling on MSM devices. Bus scaling
+ allows devices to request the clocks be set to rates sufficient
+ for the active devices needs without keeping the clocks at max
+ frequency when a slower speed is sufficient.
+
+config QCOM_BUS_CONFIG_RPMH
+ bool "RPMH Bus scaling driver"
+ depends on QCOM_BUS_SCALING
+ help
+ This option enables bus scaling using QCOM specific hardware
+ accelerators. It enables the translation of bandwidth requests
+ from logical nodes to hardware nodes controlled by the BCM (Bus
+ Clock Manager)
+
+config QCOM_GLINK
+ tristate "GLINK Probe Helper"
+ depends on RPMSG_QCOM_GLINK_SMEM
+ help
+ This enables the GLINK Probe module. The GLINK RPMSG Plugin is
+ currently designed to plugin with the remote proc framework as a
+ subdev. This module is responsible for creating the glink transports
+ when remote proc is disabled.
+
+config QCOM_GLINK_PKT
+ tristate "Enable device interface for GLINK packet channels"
+ depends on QCOM_GLINK
+ help
+ G-link packet driver provides the interface for the userspace
+ clients to communicate over G-Link via device nodes.
+ This enable the userspace clients to read and write to
+ some glink packets channel.
endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 7464fd4..addf21d 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -46,3 +46,8 @@
obj-$(CONFIG_SOC_BUS) += socinfo.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+CFLAGS_rpmh-rsc.o := -I$(src)
+obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
+obj-$(CONFIG_QCOM_FSA4480_I2C) += fsa4480-i2c.o
+obj-$(CONFIG_QCOM_GLINK) += glink_probe.o
+obj-$(CONFIG_QCOM_GLINK_PKT) += glink_pkt.o
diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c
new file mode 100644
index 0000000..e710c06
--- /dev/null
+++ b/drivers/soc/qcom/fsa4480-i2c.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/soc/qcom/fsa4480-i2c.h>
+
+#define FSA4480_I2C_NAME "fsa4480-driver"
+
+#define FSA4480_SWITCH_SETTINGS 0x04
+#define FSA4480_SWITCH_CONTROL 0x05
+#define FSA4480_SWITCH_STATUS1 0x07
+#define FSA4480_SLOW_L 0x08
+#define FSA4480_SLOW_R 0x09
+#define FSA4480_SLOW_MIC 0x0A
+#define FSA4480_SLOW_SENSE 0x0B
+#define FSA4480_SLOW_GND 0x0C
+#define FSA4480_DELAY_L_R 0x0D
+#define FSA4480_DELAY_L_MIC 0x0E
+#define FSA4480_DELAY_L_SENSE 0x0F
+#define FSA4480_DELAY_L_AGND 0x10
+#define FSA4480_RESET 0x1E
+
+struct fsa4480_priv {
+ struct regmap *regmap;
+ struct device *dev;
+ struct power_supply *usb_psy;
+ struct notifier_block psy_nb;
+ atomic_t usbc_mode;
+ struct work_struct usbc_analog_work;
+ struct blocking_notifier_head fsa4480_notifier;
+};
+
+struct fsa4480_reg_val {
+ u16 reg;
+ u8 val;
+};
+
+static const struct regmap_config fsa4480_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FSA4480_RESET,
+};
+
+static const struct fsa4480_reg_val fsa_reg_i2c_defaults[] = {
+ {FSA4480_SLOW_L, 0x00},
+ {FSA4480_SLOW_R, 0x00},
+ {FSA4480_SLOW_MIC, 0x00},
+ {FSA4480_SLOW_SENSE, 0x00},
+ {FSA4480_SLOW_GND, 0x00},
+ {FSA4480_DELAY_L_R, 0x00},
+ {FSA4480_DELAY_L_MIC, 0x00},
+ {FSA4480_DELAY_L_SENSE, 0x00},
+ {FSA4480_DELAY_L_AGND, 0x09},
+ {FSA4480_SWITCH_SETTINGS, 0x98},
+};
+
+static void fsa4480_usbc_update_settings(struct fsa4480_priv *fsa_priv,
+ u32 switch_control, u32 switch_enable)
+{
+ if (!fsa_priv->regmap) {
+ dev_err(fsa_priv->dev, "%s: regmap invalid\n", __func__);
+ return;
+ }
+
+ regmap_write(fsa_priv->regmap, FSA4480_SWITCH_SETTINGS, 0x80);
+ regmap_write(fsa_priv->regmap, FSA4480_SWITCH_CONTROL, switch_control);
+ /* FSA4480 chip hardware requirement */
+ usleep_range(50, 55);
+ regmap_write(fsa_priv->regmap, FSA4480_SWITCH_SETTINGS, switch_enable);
+}
+
+static int fsa4480_usbc_event_changed(struct notifier_block *nb,
+ unsigned long evt, void *ptr)
+{
+ int ret;
+ union power_supply_propval mode;
+ struct fsa4480_priv *fsa_priv =
+ container_of(nb, struct fsa4480_priv, psy_nb);
+ struct device *dev;
+
+ if (!fsa_priv)
+ return -EINVAL;
+
+ dev = fsa_priv->dev;
+ if (!dev)
+ return -EINVAL;
+
+ if ((struct power_supply *)ptr != fsa_priv->usb_psy ||
+ evt != PSY_EVENT_PROP_CHANGED)
+ return 0;
+
+ ret = power_supply_get_property(fsa_priv->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_MODE, &mode);
+ if (ret) {
+ dev_err(dev, "%s: Unable to read USB TYPEC_MODE: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s: USB change event received, supply mode %d, usbc mode %d, expected %d\n",
+ __func__, mode.intval, fsa_priv->usbc_mode.counter,
+ POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER);
+
+ switch (mode.intval) {
+ case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
+ case POWER_SUPPLY_TYPEC_NONE:
+ if (atomic_read(&(fsa_priv->usbc_mode)) == mode.intval)
+ break; /* filter notifications received before */
+ atomic_set(&(fsa_priv->usbc_mode), mode.intval);
+
+ dev_dbg(dev, "%s: queueing usbc_analog_work\n",
+ __func__);
+ schedule_work(&fsa_priv->usbc_analog_work);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/*
+ * fsa4480_reg_notifier - register notifier block with fsa driver
+ *
+ * @nb - notifier block of fsa4480
+ * @node - phandle node to fsa4480 device
+ *
+ * Returns 0 on success, or error code
+ */
+int fsa4480_reg_notifier(struct notifier_block *nb,
+ struct device_node *node)
+{
+ int rc = 0;
+ struct i2c_client *client = of_find_i2c_device_by_node(node);
+ struct fsa4480_priv *fsa_priv;
+
+ if (!client)
+ return -EINVAL;
+
+ fsa_priv = (struct fsa4480_priv *)i2c_get_clientdata(client);
+ if (!fsa_priv)
+ return -EINVAL;
+
+ rc = blocking_notifier_chain_register
+ (&fsa_priv->fsa4480_notifier, nb);
+ if (rc)
+ return rc;
+
+ /*
+ * as part of the init sequence check if there is a connected
+ * USB C analog adapter
+ */
+ dev_dbg(fsa_priv->dev, "%s: verify if USB adapter is already inserted\n",
+ __func__);
+ rc = fsa4480_usbc_event_changed(&fsa_priv->psy_nb,
+ PSY_EVENT_PROP_CHANGED,
+ fsa_priv->usb_psy);
+
+ return rc;
+}
+EXPORT_SYMBOL(fsa4480_reg_notifier);
+
+/*
+ * fsa4480_unreg_notifier - unregister notifier block with fsa driver
+ *
+ * @nb - notifier block of fsa4480
+ * @node - phandle node to fsa4480 device
+ *
+ * Returns 0 on pass, or error code
+ */
+int fsa4480_unreg_notifier(struct notifier_block *nb,
+ struct device_node *node)
+{
+ struct i2c_client *client = of_find_i2c_device_by_node(node);
+ struct fsa4480_priv *fsa_priv;
+
+ if (!client)
+ return -EINVAL;
+
+ fsa_priv = (struct fsa4480_priv *)i2c_get_clientdata(client);
+ if (!fsa_priv)
+ return -EINVAL;
+
+ atomic_set(&(fsa_priv->usbc_mode), 0);
+ fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
+ return blocking_notifier_chain_unregister
+ (&fsa_priv->fsa4480_notifier, nb);
+}
+EXPORT_SYMBOL(fsa4480_unreg_notifier);
+
+static int fsa4480_validate_display_port_settings(struct fsa4480_priv *fsa_priv)
+{
+ u32 switch_status = 0;
+
+ regmap_read(fsa_priv->regmap, FSA4480_SWITCH_STATUS1, &switch_status);
+
+ if ((switch_status != 0x23) && (switch_status != 0x1C)) {
+ pr_err("AUX SBU1/2 switch status is invalid = %u\n",
+ switch_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+/*
+ * fsa4480_switch_event - configure FSA switch position based on event
+ *
+ * @node - phandle node to fsa4480 device
+ * @event - fsa_function enum
+ *
+ * Returns int on whether the switch happened or not
+ */
+int fsa4480_switch_event(struct device_node *node,
+ enum fsa_function event)
+{
+ int switch_control = 0;
+ struct i2c_client *client = of_find_i2c_device_by_node(node);
+ struct fsa4480_priv *fsa_priv;
+
+ if (!client)
+ return -EINVAL;
+
+ fsa_priv = (struct fsa4480_priv *)i2c_get_clientdata(client);
+ if (!fsa_priv)
+ return -EINVAL;
+ if (!fsa_priv->regmap)
+ return -EINVAL;
+
+ switch (event) {
+ case FSA_MIC_GND_SWAP:
+ regmap_read(fsa_priv->regmap, FSA4480_SWITCH_CONTROL,
+ &switch_control);
+ if ((switch_control & 0x07) == 0x07)
+ switch_control = 0x0;
+ else
+ switch_control = 0x7;
+ fsa4480_usbc_update_settings(fsa_priv, switch_control, 0x9F);
+ break;
+ case FSA_USBC_ORIENTATION_CC1:
+ fsa4480_usbc_update_settings(fsa_priv, 0x00, 0xE0);
+ return fsa4480_validate_display_port_settings(fsa_priv);
+ case FSA_USBC_ORIENTATION_CC2:
+ fsa4480_usbc_update_settings(fsa_priv, 0x60, 0xE0);
+ return fsa4480_validate_display_port_settings(fsa_priv);
+ case FSA_USBC_DISPLAYPORT_DISCONNECTED:
+ fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(fsa4480_switch_event);
+
+static int fsa4480_usbc_analog_setup_switches
+ (struct fsa4480_priv *fsa_priv, bool active)
+{
+ dev_dbg(fsa_priv->dev, "%s: setting GPIOs active = %d\n",
+ __func__, active);
+
+ if (active) {
+ /* activate switches */
+ fsa4480_usbc_update_settings(fsa_priv, 0x00, 0x9F);
+
+ /* notify call chain on event */
+ blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
+ POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER, NULL);
+ } else {
+ /* notify call chain on event */
+ blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
+ POWER_SUPPLY_TYPEC_NONE, NULL);
+
+ /* deactivate switches */
+ fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
+ }
+
+ return 0;
+}
+
+static void fsa4480_usbc_analog_work_fn(struct work_struct *work)
+{
+ struct fsa4480_priv *fsa_priv =
+ container_of(work, struct fsa4480_priv, usbc_analog_work);
+
+ if (!fsa_priv) {
+ pr_err("%s: fsa container invalid\n", __func__);
+ return;
+ }
+ fsa4480_usbc_analog_setup_switches(fsa_priv,
+ atomic_read(&(fsa_priv->usbc_mode)) != POWER_SUPPLY_TYPEC_NONE);
+}
+
+static void fsa4480_update_reg_defaults(struct regmap *regmap)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(fsa_reg_i2c_defaults); i++)
+ regmap_write(regmap, fsa_reg_i2c_defaults[i].reg,
+ fsa_reg_i2c_defaults[i].val);
+}
+
+static int fsa4480_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct fsa4480_priv *fsa_priv;
+ int rc = 0;
+
+ fsa_priv = devm_kzalloc(&i2c->dev, sizeof(*fsa_priv),
+ GFP_KERNEL);
+ if (!fsa_priv)
+ return -ENOMEM;
+
+ fsa_priv->dev = &i2c->dev;
+
+ fsa_priv->usb_psy = power_supply_get_by_name("usb");
+ if (!fsa_priv->usb_psy) {
+ rc = -EPROBE_DEFER;
+ dev_dbg(fsa_priv->dev,
+ "%s: could not get USB psy info: %d\n",
+ __func__, rc);
+ goto err_data;
+ }
+
+ fsa_priv->regmap = devm_regmap_init_i2c(i2c, &fsa4480_regmap_config);
+ if (IS_ERR_OR_NULL(fsa_priv->regmap)) {
+ dev_err(fsa_priv->dev, "%s: Failed to initialize regmap: %d\n",
+ __func__, rc);
+ if (!fsa_priv->regmap) {
+ rc = -EINVAL;
+ goto err_supply;
+ }
+ rc = PTR_ERR(fsa_priv->regmap);
+ goto err_supply;
+ }
+
+ fsa4480_update_reg_defaults(fsa_priv->regmap);
+
+ fsa_priv->psy_nb.notifier_call = fsa4480_usbc_event_changed;
+ fsa_priv->psy_nb.priority = 0;
+ rc = power_supply_reg_notifier(&fsa_priv->psy_nb);
+ if (rc) {
+ dev_err(fsa_priv->dev, "%s: power supply reg failed: %d\n",
+ __func__, rc);
+ goto err_supply;
+ }
+
+ i2c_set_clientdata(i2c, fsa_priv);
+
+ INIT_WORK(&fsa_priv->usbc_analog_work,
+ fsa4480_usbc_analog_work_fn);
+
+ fsa_priv->fsa4480_notifier.rwsem =
+ (struct rw_semaphore)__RWSEM_INITIALIZER
+ ((fsa_priv->fsa4480_notifier).rwsem);
+ fsa_priv->fsa4480_notifier.head = NULL;
+
+ return 0;
+
+err_supply:
+ power_supply_put(fsa_priv->usb_psy);
+err_data:
+ devm_kfree(&i2c->dev, fsa_priv);
+ return rc;
+}
+
+static int fsa4480_remove(struct i2c_client *i2c)
+{
+ struct fsa4480_priv *fsa_priv =
+ (struct fsa4480_priv *)i2c_get_clientdata(i2c);
+
+ if (!fsa_priv)
+ return -EINVAL;
+
+ fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
+
+ /* deregister from PMI */
+ power_supply_unreg_notifier(&fsa_priv->psy_nb);
+ power_supply_put(fsa_priv->usb_psy);
+ dev_set_drvdata(&i2c->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id fsa4480_i2c_dt_match[] = {
+ {
+ .compatible = "qcom,fsa4480-i2c",
+ },
+ {}
+};
+
+static struct i2c_driver fsa4480_i2c_driver = {
+ .driver = {
+ .name = FSA4480_I2C_NAME,
+ .of_match_table = fsa4480_i2c_dt_match,
+ },
+ .probe = fsa4480_probe,
+ .remove = fsa4480_remove,
+};
+
+static int __init fsa4480_init(void)
+{
+ int rc;
+
+ rc = i2c_add_driver(&fsa4480_i2c_driver);
+ if (rc)
+ pr_err("fsa4480: Failed to register I2C driver: %d\n", rc);
+
+ return rc;
+}
+module_init(fsa4480_init);
+
+static void __exit fsa4480_exit(void)
+{
+ i2c_del_driver(&fsa4480_i2c_driver);
+}
+module_exit(fsa4480_exit);
+
+MODULE_DESCRIPTION("FSA4480 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_pkt.c b/drivers/soc/qcom/glink_pkt.c
new file mode 100644
index 0000000..848a1cf
--- /dev/null
+++ b/drivers/soc/qcom/glink_pkt.c
@@ -0,0 +1,891 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/ipc_logging.h>
+#include <linux/refcount.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/rpmsg.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/idr.h>
+#include <linux/of.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/termios.h>
+
+/* Define IPC Logging Macros */
+#define GLINK_PKT_IPC_LOG_PAGE_CNT 2
+static void *glink_pkt_ilctxt;
+
+static int glink_pkt_debug_mask;
+module_param_named(debug_mask, glink_pkt_debug_mask, int, 0664);
+
+enum {
+ GLINK_PKT_INFO = 1U << 0,
+};
+
+#define GLINK_PKT_INFO(x, ...) \
+do { \
+ if (glink_pkt_debug_mask & GLINK_PKT_INFO) { \
+ ipc_log_string(glink_pkt_ilctxt, \
+ "[%s]: "x, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#define GLINK_PKT_ERR(x, ...) \
+do { \
+ pr_err_ratelimited("[%s]: "x, __func__, ##__VA_ARGS__); \
+ ipc_log_string(glink_pkt_ilctxt, "[%s]: "x, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define SMD_DTR_SIG BIT(31)
+#define SMD_CTS_SIG BIT(30)
+#define SMD_CD_SIG BIT(29)
+#define SMD_RI_SIG BIT(28)
+
+#define to_smd_signal(sigs) \
+do { \
+ sigs &= 0x0fff; \
+ if (sigs & TIOCM_DTR) \
+ sigs |= SMD_DTR_SIG; \
+ if (sigs & TIOCM_RTS) \
+ sigs |= SMD_CTS_SIG; \
+ if (sigs & TIOCM_CD) \
+ sigs |= SMD_CD_SIG; \
+ if (sigs & TIOCM_RI) \
+ sigs |= SMD_RI_SIG; \
+} while (0)
+
+#define from_smd_signal(sigs) \
+do { \
+ if (sigs & SMD_DTR_SIG) \
+ sigs |= TIOCM_DSR; \
+ if (sigs & SMD_CTS_SIG) \
+ sigs |= TIOCM_CTS; \
+ if (sigs & SMD_CD_SIG) \
+ sigs |= TIOCM_CD; \
+ if (sigs & SMD_RI_SIG) \
+ sigs |= TIOCM_RI; \
+ sigs &= 0x0fff; \
+} while (0)
+
+#define GLINK_PKT_IOCTL_MAGIC (0xC3)
+
+#define GLINK_PKT_IOCTL_QUEUE_RX_INTENT \
+ _IOW(GLINK_PKT_IOCTL_MAGIC, 0, unsigned int)
+
+#define MODULE_NAME "glink_pkt"
+static dev_t glink_pkt_major;
+static struct class *glink_pkt_class;
+static int num_glink_pkt_devs;
+
+static DEFINE_IDA(glink_pkt_minor_ida);
+
+/**
+ * struct glink_pkt - driver context, relates rpdev to cdev
+ * @dev: glink pkt device
+ * @cdev: cdev for the glink pkt device
+ * @drv: rpmsg driver for registering to rpmsg bus
+ * @lock: synchronization of @rpdev and @open_tout modifications
+ * @ch_open: wait object for opening the glink channel
+ * @refcount: count how many userspace clients have handles
+ * @rpdev: underlaying rpmsg device
+ * @queue_lock: synchronization of @queue operations
+ * @queue: incoming message queue
+ * @readq: wait object for incoming queue
+ * @sig_change: flag to indicate serial signal change
+ * @dev_name: /dev/@dev_name for glink_pkt device
+ * @ch_name: glink channel to match to
+ * @edge: glink edge to match to
+ * @open_tout: timeout for open syscall, configurable in sysfs
+ */
+struct glink_pkt_device {
+ struct device dev;
+ struct cdev cdev;
+ struct rpmsg_driver drv;
+
+ struct mutex lock;
+ struct completion ch_open;
+ refcount_t refcount;
+ struct rpmsg_device *rpdev;
+
+ spinlock_t queue_lock;
+ struct sk_buff_head queue;
+ wait_queue_head_t readq;
+ int sig_change;
+
+ const char *dev_name;
+ const char *ch_name;
+ const char *edge;
+ int open_tout;
+};
+
+#define dev_to_gpdev(_dev) container_of(_dev, struct glink_pkt_device, dev)
+#define cdev_to_gpdev(_cdev) container_of(_cdev, struct glink_pkt_device, cdev)
+#define drv_to_rpdrv(_drv) container_of(_drv, struct rpmsg_driver, drv)
+#define rpdrv_to_gpdev(_rdrv) container_of(_rdrv, struct glink_pkt_device, drv)
+
+static ssize_t open_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
+ long tmp;
+
+ mutex_lock(&gpdev->lock);
+ if (kstrtol(buf, 0, &tmp)) {
+ mutex_unlock(&gpdev->lock);
+ GLINK_PKT_ERR("unable to convert:%s to an int for /dev/%s\n",
+ buf, gpdev->dev_name);
+ return -EINVAL;
+ }
+ gpdev->open_tout = tmp;
+ mutex_unlock(&gpdev->lock);
+
+ return n;
+}
+
+static ssize_t open_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
+ ssize_t ret;
+
+ mutex_lock(&gpdev->lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", gpdev->open_tout);
+ mutex_unlock(&gpdev->lock);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(open_timeout);
+
+static int glink_pkt_rpdev_probe(struct rpmsg_device *rpdev)
+{
+ struct device_driver *drv = rpdev->dev.driver;
+ struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
+ struct glink_pkt_device *gpdev = rpdrv_to_gpdev(rpdrv);
+
+ mutex_lock(&gpdev->lock);
+ gpdev->rpdev = rpdev;
+ mutex_unlock(&gpdev->lock);
+
+ dev_set_drvdata(&rpdev->dev, gpdev);
+ complete_all(&gpdev->ch_open);
+
+ return 0;
+}
+
+static int glink_pkt_rpdev_cb(struct rpmsg_device *rpdev, void *buf, int len,
+ void *priv, u32 addr)
+{
+ struct glink_pkt_device *gpdev = dev_get_drvdata(&rpdev->dev);
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, buf, len);
+
+ spin_lock_irqsave(&gpdev->queue_lock, flags);
+ skb_queue_tail(&gpdev->queue, skb);
+ spin_unlock_irqrestore(&gpdev->queue_lock, flags);
+
+ /* wake up any blocking processes, waiting for new data */
+ wake_up_interruptible(&gpdev->readq);
+
+ return 0;
+}
+
+static int glink_pkt_rpdev_sigs(struct rpmsg_device *rpdev, u32 old, u32 new)
+{
+ struct device_driver *drv = rpdev->dev.driver;
+ struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
+ struct glink_pkt_device *gpdev = rpdrv_to_gpdev(rpdrv);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpdev->queue_lock, flags);
+ gpdev->sig_change = true;
+ spin_unlock_irqrestore(&gpdev->queue_lock, flags);
+
+ /* wake up any blocking processes, waiting for new data */
+ wake_up_interruptible(&gpdev->readq);
+
+ return 0;
+}
+
+static void glink_pkt_rpdev_remove(struct rpmsg_device *rpdev)
+{
+ struct device_driver *drv = rpdev->dev.driver;
+ struct rpmsg_driver *rpdrv = drv_to_rpdrv(drv);
+ struct glink_pkt_device *gpdev = rpdrv_to_gpdev(rpdrv);
+
+ mutex_lock(&gpdev->lock);
+ gpdev->rpdev = NULL;
+ mutex_unlock(&gpdev->lock);
+
+ dev_set_drvdata(&rpdev->dev, NULL);
+
+ /* wake up any blocked readers */
+ reinit_completion(&gpdev->ch_open);
+ wake_up_interruptible(&gpdev->readq);
+}
+
+/**
+ * glink_pkt_open() - open() syscall for the glink_pkt device
+ * inode: Pointer to the inode structure.
+ * file: Pointer to the file structure.
+ *
+ * This function is used to open the glink pkt device when
+ * userspace client do a open() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+int glink_pkt_open(struct inode *inode, struct file *file)
+{
+ struct glink_pkt_device *gpdev = cdev_to_gpdev(inode->i_cdev);
+ int tout = msecs_to_jiffies(gpdev->open_tout * 1000);
+ struct device *dev = &gpdev->dev;
+ int ret;
+
+ refcount_inc(&gpdev->refcount);
+ get_device(dev);
+
+ GLINK_PKT_INFO("begin for %s by %s:%ld ref_cnt[%d]\n",
+ gpdev->ch_name, current->comm,
+ task_pid_nr(current), refcount_read(&gpdev->refcount));
+
+ ret = wait_for_completion_interruptible_timeout(&gpdev->ch_open, tout);
+ if (ret <= 0) {
+ refcount_dec(&gpdev->refcount);
+ put_device(dev);
+ GLINK_PKT_INFO("timeout for %s by %s:%ld\n", gpdev->ch_name,
+ current->comm, task_pid_nr(current));
+ return -ETIMEDOUT;
+ }
+ file->private_data = gpdev;
+
+ GLINK_PKT_INFO("end for %s by %s:%ld ref_cnt[%d]\n",
+ gpdev->ch_name, current->comm,
+ task_pid_nr(current), refcount_read(&gpdev->refcount));
+
+ return 0;
+}
+
+/**
+ * glink_pkt_release() - release operation on glink_pkt device
+ * inode: Pointer to the inode structure.
+ * file: Pointer to the file structure.
+ *
+ * This function is used to release the glink pkt device when
+ * userspace client do a close() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+int glink_pkt_release(struct inode *inode, struct file *file)
+{
+ struct glink_pkt_device *gpdev = cdev_to_gpdev(inode->i_cdev);
+ struct device *dev = &gpdev->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ GLINK_PKT_INFO("for %s by %s:%ld ref_cnt[%d]\n",
+ gpdev->ch_name, current->comm,
+ task_pid_nr(current), refcount_read(&gpdev->refcount));
+
+ refcount_dec(&gpdev->refcount);
+ if (refcount_read(&gpdev->refcount) == 1) {
+ spin_lock_irqsave(&gpdev->queue_lock, flags);
+
+ /* Discard all SKBs */
+ while (!skb_queue_empty(&gpdev->queue)) {
+ skb = skb_dequeue(&gpdev->queue);
+ kfree_skb(skb);
+ }
+ wake_up_interruptible(&gpdev->readq);
+ gpdev->sig_change = false;
+ spin_unlock_irqrestore(&gpdev->queue_lock, flags);
+ }
+
+ put_device(dev);
+
+ return 0;
+}
+
+/**
+ * glink_pkt_read() - read() syscall for the glink_pkt device
+ * file: Pointer to the file structure.
+ * buf: Pointer to the userspace buffer.
+ * count: Number bytes to read from the file.
+ * ppos: Pointer to the position into the file.
+ *
+ * This function is used to Read the data from glink pkt device when
+ * userspace client do a read() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+ssize_t glink_pkt_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct glink_pkt_device *gpdev = file->private_data;
+ unsigned long flags;
+ struct sk_buff *skb;
+ int use;
+
+ if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
+ GLINK_PKT_ERR("invalid device handle\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!completion_done(&gpdev->ch_open)) {
+ GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
+ return -ENETRESET;
+ }
+
+ GLINK_PKT_INFO("begin for %s by %s:%ld ref_cnt[%d]\n",
+ gpdev->ch_name, current->comm,
+ task_pid_nr(current), refcount_read(&gpdev->refcount));
+
+ spin_lock_irqsave(&gpdev->queue_lock, flags);
+ /* Wait for data in the queue */
+ if (skb_queue_empty(&gpdev->queue)) {
+ spin_unlock_irqrestore(&gpdev->queue_lock, flags);
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ /* Wait until we get data or the endpoint goes away */
+ if (wait_event_interruptible(gpdev->readq,
+ !skb_queue_empty(&gpdev->queue) ||
+ !completion_done(&gpdev->ch_open)))
+ return -ERESTARTSYS;
+
+ /* We lost the endpoint while waiting */
+ if (!completion_done(&gpdev->ch_open))
+ return -ENETRESET;
+
+ spin_lock_irqsave(&gpdev->queue_lock, flags);
+ }
+
+ skb = skb_dequeue(&gpdev->queue);
+ spin_unlock_irqrestore(&gpdev->queue_lock, flags);
+ if (!skb)
+ return -EFAULT;
+
+ use = min_t(size_t, count, skb->len);
+ if (copy_to_user(buf, skb->data, use))
+ use = -EFAULT;
+
+ kfree_skb(skb);
+
+ GLINK_PKT_INFO("end for %s by %s:%ld ret[%d]\n", gpdev->ch_name,
+ current->comm, task_pid_nr(current), use);
+
+ return use;
+}
+
+/**
+ * glink_pkt_write() - write() syscall for the glink_pkt device
+ * file: Pointer to the file structure.
+ * buf: Pointer to the userspace buffer.
+ * count: Number bytes to read from the file.
+ * ppos: Pointer to the position into the file.
+ *
+ * This function is used to write the data to glink pkt device when
+ * userspace client do a write() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+ssize_t glink_pkt_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct glink_pkt_device *gpdev = file->private_data;
+ void *kbuf;
+ int ret;
+
+ gpdev = file->private_data;
+ if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
+ GLINK_PKT_ERR("invalid device handle\n", __func__);
+ return -EINVAL;
+ }
+
+ GLINK_PKT_INFO("begin to %s buffer_size %zu\n", gpdev->ch_name, count);
+ kbuf = memdup_user(buf, count);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
+
+ if (mutex_lock_interruptible(&gpdev->lock)) {
+ ret = -ERESTARTSYS;
+ goto free_kbuf;
+ }
+ if (!completion_done(&gpdev->ch_open) || !gpdev->rpdev) {
+ GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
+ ret = -ENETRESET;
+ goto unlock_ch;
+ }
+
+ if (file->f_flags & O_NONBLOCK)
+ ret = rpmsg_trysend(gpdev->rpdev->ept, kbuf, count);
+ else
+ ret = rpmsg_send(gpdev->rpdev->ept, kbuf, count);
+
+unlock_ch:
+ mutex_unlock(&gpdev->lock);
+
+free_kbuf:
+ kfree(kbuf);
+ GLINK_PKT_INFO("finish to %s ret %d\n", gpdev->ch_name, ret);
+ return ret < 0 ? ret : count;
+}
+
+/**
+ * glink_pkt_poll() - poll() syscall for the glink_pkt device
+ * file: Pointer to the file structure.
+ * wait: pointer to Poll table.
+ *
+ * This function is used to poll on the glink pkt device when
+ * userspace client do a poll() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static unsigned int glink_pkt_poll(struct file *file, poll_table *wait)
+{
+ struct glink_pkt_device *gpdev = file->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ gpdev = file->private_data;
+ if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
+ GLINK_PKT_ERR("invalid device handle\n", __func__);
+ return POLLERR;
+ }
+ if (!completion_done(&gpdev->ch_open)) {
+ GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
+ return POLLHUP;
+ }
+
+ poll_wait(file, &gpdev->readq, wait);
+
+ mutex_lock(&gpdev->lock);
+
+ if (!completion_done(&gpdev->ch_open) || !gpdev->rpdev) {
+ GLINK_PKT_ERR("%s channel reset after wait\n", gpdev->ch_name);
+ mutex_unlock(&gpdev->lock);
+ return POLLHUP;
+ }
+
+ spin_lock_irqsave(&gpdev->queue_lock, flags);
+ if (!skb_queue_empty(&gpdev->queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (gpdev->sig_change)
+ mask |= POLLPRI;
+ spin_unlock_irqrestore(&gpdev->queue_lock, flags);
+
+ mask |= rpmsg_poll(gpdev->rpdev->ept, file, wait);
+
+ mutex_unlock(&gpdev->lock);
+
+ return mask;
+}
+
+/**
+ * glink_pkt_tiocmset() - set the signals for glink_pkt device
+ * devp: Pointer to the glink_pkt device structure.
+ * cmd: IOCTL command.
+ * arg: Arguments to the ioctl call.
+ *
+ * This function is used to set the signals on the glink pkt device
+ * when userspace client do a ioctl() system call with TIOCMBIS,
+ * TIOCMBIC and TICOMSET.
+ */
+static int glink_pkt_tiocmset(struct glink_pkt_device *gpdev, unsigned int cmd,
+ unsigned long arg)
+{
+ u32 lsigs, rsigs, val;
+ int ret;
+
+ ret = get_user(val, (u32 *)arg);
+ if (ret)
+ return ret;
+
+ to_smd_signal(val);
+ ret = rpmsg_get_sigs(gpdev->rpdev->ept, &lsigs, &rsigs);
+ if (ret < 0) {
+ GLINK_PKT_ERR("%s: Get signals failed[%d]\n", __func__, ret);
+ return ret;
+ }
+ switch (cmd) {
+ case TIOCMBIS:
+ lsigs |= val;
+ break;
+ case TIOCMBIC:
+ lsigs &= ~val;
+ break;
+ case TIOCMSET:
+ lsigs = val;
+ break;
+ }
+ ret = rpmsg_set_sigs(gpdev->rpdev->ept, lsigs);
+ GLINK_PKT_INFO("sigs[0x%x] ret[%d]\n", lsigs, ret);
+ return ret;
+}
+
+/**
+ * glink_pkt_ioctl() - ioctl() syscall for the glink_pkt device
+ * file: Pointer to the file structure.
+ * cmd: IOCTL command.
+ * arg: Arguments to the ioctl call.
+ *
+ * This function is used to ioctl on the glink pkt device when
+ * userspace client do a ioctl() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static long glink_pkt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct glink_pkt_device *gpdev;
+ unsigned long flags;
+ u32 lsigs, rsigs;
+ int ret;
+
+ gpdev = file->private_data;
+ if (!gpdev || refcount_read(&gpdev->refcount) == 1) {
+ GLINK_PKT_ERR("invalid device handle\n", __func__);
+ return -EINVAL;
+ }
+ if (mutex_lock_interruptible(&gpdev->lock))
+ return -ERESTARTSYS;
+
+ if (!completion_done(&gpdev->ch_open)) {
+ GLINK_PKT_ERR("%s channel in reset\n", gpdev->ch_name);
+ mutex_unlock(&gpdev->lock);
+ return -ENETRESET;
+ }
+
+ switch (cmd) {
+ case TIOCMGET:
+ spin_lock_irqsave(&gpdev->queue_lock, flags);
+ gpdev->sig_change = false;
+ spin_unlock_irqrestore(&gpdev->queue_lock, flags);
+
+ ret = rpmsg_get_sigs(gpdev->rpdev->ept, &lsigs, &rsigs);
+ from_smd_signal(rsigs);
+ if (!ret)
+ ret = put_user(rsigs, (uint32_t *)arg);
+ break;
+ case TIOCMSET:
+ case TIOCMBIS:
+ case TIOCMBIC:
+ ret = glink_pkt_tiocmset(gpdev, cmd, arg);
+ break;
+ case GLINK_PKT_IOCTL_QUEUE_RX_INTENT:
+ /* Return success to not break userspace client logic */
+ ret = 0;
+ break;
+ default:
+ GLINK_PKT_ERR("unrecognized ioctl command 0x%x\n", cmd);
+ ret = -ENOIOCTLCMD;
+ }
+
+ mutex_unlock(&gpdev->lock);
+
+ return ret;
+}
+
+static const struct file_operations glink_pkt_fops = {
+ .owner = THIS_MODULE,
+ .open = glink_pkt_open,
+ .release = glink_pkt_release,
+ .read = glink_pkt_read,
+ .write = glink_pkt_write,
+ .poll = glink_pkt_poll,
+ .unlocked_ioctl = glink_pkt_ioctl,
+ .compat_ioctl = glink_pkt_ioctl,
+};
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
+
+ return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", gpdev->ch_name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *glink_pkt_device_attrs[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(glink_pkt_device);
+
+/**
+ * parse_glinkpkt_devicetree() - parse device tree binding for a subnode
+ *
+ * np: pointer to a device tree node
+ * gpdev: pointer to GLINK PACKET device
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_pkt_parse_devicetree(struct device_node *np,
+ struct glink_pkt_device *gpdev)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,glinkpkt-edge";
+ ret = of_property_read_string(np, key, &gpdev->edge);
+ if (ret < 0)
+ goto error;
+
+ key = "qcom,glinkpkt-ch-name";
+ ret = of_property_read_string(np, key, &gpdev->ch_name);
+ if (ret < 0)
+ goto error;
+
+ key = "qcom,glinkpkt-dev-name";
+ ret = of_property_read_string(np, key, &gpdev->dev_name);
+ if (ret < 0)
+ goto error;
+
+ GLINK_PKT_INFO("Parsed %s:%s /dev/%s\n", gpdev->edge, gpdev->ch_name,
+ gpdev->dev_name);
+ return 0;
+
+error:
+ GLINK_PKT_ERR("%s: missing key: %s\n", __func__, key);
+ return ret;
+}
+
+static void glink_pkt_release_device(struct device *dev)
+{
+ struct glink_pkt_device *gpdev = dev_to_gpdev(dev);
+
+ ida_simple_remove(&glink_pkt_minor_ida, MINOR(gpdev->dev.devt));
+ cdev_del(&gpdev->cdev);
+ kfree(gpdev);
+}
+
+static int glink_pkt_init_rpmsg(struct glink_pkt_device *gpdev)
+{
+
+ struct rpmsg_driver *rpdrv = &gpdev->drv;
+ struct device *dev = &gpdev->dev;
+ struct rpmsg_device_id *match;
+ char *drv_name;
+
+ /* zalloc array of two to NULL terminate the match list */
+ match = devm_kzalloc(dev, 2 * sizeof(*match), GFP_KERNEL);
+ if (!match)
+ return -ENOMEM;
+ snprintf(match->name, RPMSG_NAME_SIZE, "%s", gpdev->ch_name);
+
+ drv_name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s_%s", "glink_pkt", gpdev->dev_name);
+ if (!drv_name)
+ return -ENOMEM;
+
+ rpdrv->probe = glink_pkt_rpdev_probe;
+ rpdrv->remove = glink_pkt_rpdev_remove;
+ rpdrv->callback = glink_pkt_rpdev_cb;
+ rpdrv->signals = glink_pkt_rpdev_sigs;
+ rpdrv->id_table = match;
+ rpdrv->drv.name = drv_name;
+
+ register_rpmsg_driver(rpdrv);
+
+ return 0;
+}
+
+/**
+ * glink_pkt_add_device() - Create glink packet device and add cdev
+ * parent: pointer to the parent device of this glink packet device
+ * np: pointer to device node this glink packet device represents
+ *
+ * return: 0 for success, Standard Linux errors
+ */
+static int glink_pkt_create_device(struct device *parent,
+ struct device_node *np)
+{
+ struct glink_pkt_device *gpdev;
+ struct device *dev;
+ int ret;
+
+ gpdev = devm_kzalloc(parent, sizeof(*gpdev), GFP_KERNEL);
+ if (!gpdev)
+ return -ENOMEM;
+ ret = glink_pkt_parse_devicetree(np, gpdev);
+ if (ret < 0) {
+ GLINK_PKT_ERR("failed to parse dt ret:%d\n", ret);
+ goto free_gpdev;
+ }
+
+ dev = &gpdev->dev;
+ mutex_init(&gpdev->lock);
+ refcount_set(&gpdev->refcount, 1);
+ init_completion(&gpdev->ch_open);
+
+ /* Default open timeout for open is 120 sec */
+ gpdev->open_tout = 120;
+ gpdev->sig_change = false;
+
+ spin_lock_init(&gpdev->queue_lock);
+ skb_queue_head_init(&gpdev->queue);
+ init_waitqueue_head(&gpdev->readq);
+
+ device_initialize(dev);
+ dev->class = glink_pkt_class;
+ dev->parent = parent;
+ dev->groups = glink_pkt_device_groups;
+ dev_set_drvdata(dev, gpdev);
+
+ cdev_init(&gpdev->cdev, &glink_pkt_fops);
+ gpdev->cdev.owner = THIS_MODULE;
+
+ ret = ida_simple_get(&glink_pkt_minor_ida, 0, num_glink_pkt_devs,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto free_dev;
+
+ dev->devt = MKDEV(MAJOR(glink_pkt_major), ret);
+ dev_set_name(dev, gpdev->dev_name, ret);
+
+ ret = cdev_add(&gpdev->cdev, dev->devt, 1);
+ if (ret) {
+ GLINK_PKT_ERR("cdev_add failed for %s ret:%d\n",
+ gpdev->dev_name, ret);
+ goto free_minor_ida;
+ }
+
+ dev->release = glink_pkt_release_device;
+ ret = device_add(dev);
+ if (ret) {
+ GLINK_PKT_ERR("device_create failed for %s ret:%d\n",
+ gpdev->dev_name, ret);
+ goto free_minor_ida;
+ }
+
+ if (device_create_file(dev, &dev_attr_open_timeout))
+ GLINK_PKT_ERR("device_create_file failed for %s\n",
+ gpdev->dev_name);
+
+ if (glink_pkt_init_rpmsg(gpdev))
+ goto free_minor_ida;
+
+ return 0;
+
+free_minor_ida:
+ ida_simple_remove(&glink_pkt_minor_ida, MINOR(dev->devt));
+free_dev:
+ put_device(dev);
+free_gpdev:
+ kfree(gpdev);
+
+ return ret;
+}
+
+/**
+ * glink_pkt_deinit() - De-initialize this module
+ *
+ * This function frees all the memory and unregisters the char device region.
+ */
+static void glink_pkt_deinit(void)
+{
+ class_destroy(glink_pkt_class);
+ unregister_chrdev_region(MAJOR(glink_pkt_major), num_glink_pkt_devs);
+}
+
+/**
+ * glink_pkt_probe() - Probe a GLINK packet device
+ *
+ * pdev: Pointer to platform device.
+ *
+ * return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to a G-Link packet device.
+ */
+static int glink_pkt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *cn;
+ int ret;
+
+ num_glink_pkt_devs = of_get_child_count(dev->of_node);
+ ret = alloc_chrdev_region(&glink_pkt_major, 0, num_glink_pkt_devs,
+ "glinkpkt");
+ if (ret < 0) {
+ GLINK_PKT_ERR("alloc_chrdev_region failed ret:%d\n", ret);
+ return ret;
+ }
+ glink_pkt_class = class_create(THIS_MODULE, "glinkpkt");
+ if (IS_ERR(glink_pkt_class)) {
+ GLINK_PKT_ERR("class_create failed ret:%d\n",
+ PTR_ERR(glink_pkt_class));
+ goto error_deinit;
+ }
+
+ for_each_child_of_node(dev->of_node, cn) {
+ glink_pkt_create_device(dev, cn);
+ }
+
+ GLINK_PKT_INFO("G-Link Packet Port Driver Initialized\n");
+ return 0;
+
+error_deinit:
+ glink_pkt_deinit();
+ return ret;
+}
+
+static const struct of_device_id glink_pkt_match_table[] = {
+ { .compatible = "qcom,glinkpkt" },
+ {},
+};
+
+static struct platform_driver glink_pkt_driver = {
+ .probe = glink_pkt_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = glink_pkt_match_table,
+ },
+};
+
+/**
+ * glink_pkt_init() - Initialization function for this module
+ *
+ * returns: 0 on success, standard Linux error code otherwise.
+ */
+static int __init glink_pkt_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&glink_pkt_driver);
+ if (ret) {
+ GLINK_PKT_ERR("%s: glink_pkt register failed %d\n", ret);
+ return ret;
+ }
+ glink_pkt_ilctxt = ipc_log_context_create(GLINK_PKT_IPC_LOG_PAGE_CNT,
+ "glink_pkt", 0);
+ return 0;
+}
+
+/**
+ * glink_pkt_exit() - Exit function for this module
+ *
+ * This function is used to cleanup the module during the exit.
+ */
+static void __exit glink_pkt_exit(void)
+{
+ glink_pkt_deinit();
+}
+
+module_init(glink_pkt_init);
+module_exit(glink_pkt_exit);
+
+MODULE_DESCRIPTION("MSM G-Link Packet Port");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c
new file mode 100644
index 0000000..1b4d675
--- /dev/null
+++ b/drivers/soc/qcom/glink_probe.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <linux/rpmsg/qcom_glink.h>
+#include <linux/rpmsg.h>
+#include <linux/ipc_logging.h>
+
+#define GLINK_PROBE_LOG_PAGE_CNT 4
+static void *glink_ilc;
+
+#define GLINK_INFO(x, ...) \
+do { \
+ if (glink_ilc) \
+ ipc_log_string(glink_ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define GLINK_ERR(dev, x, ...) \
+do { \
+ dev_err(dev, "[%s]: "x, __func__, ##__VA_ARGS__); \
+ if (glink_ilc) \
+ ipc_log_string(glink_ilc, "[%s]: "x, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define GLINK_SSR_DO_CLEANUP 0
+#define GLINK_SSR_CLEANUP_DONE 1
+#define GLINK_SSR_PRIORITY 1
+#define GLINK_SSR_REPLY_TIMEOUT HZ
+
+struct do_cleanup_msg {
+ __le32 version;
+ __le32 command;
+ __le32 seq_num;
+ __le32 name_len;
+ char name[32];
+};
+
+struct cleanup_done_msg {
+ __le32 version;
+ __le32 response;
+ __le32 seq_num;
+};
+
+struct glink_ssr_nb {
+ struct list_head list;
+ struct glink_ssr *ssr;
+ void *ssr_register_handle;
+
+ const char *glink_label;
+ const char *ssr_label;
+
+ struct notifier_block nb;
+};
+
+struct glink_ssr {
+ struct device *dev;
+ struct rpmsg_endpoint *ept;
+
+ struct list_head notify_list;
+
+ u32 seq_num;
+ struct completion completion;
+};
+
+struct edge_info {
+ struct list_head list;
+ struct device *dev;
+ struct device_node *node;
+
+ const char *glink_label;
+ const char *ssr_label;
+ void *glink;
+
+ int (*register_fn)(struct edge_info *einfo);
+ void (*unregister_fn)(struct edge_info *einfo);
+ struct notifier_block nb;
+};
+LIST_HEAD(edge_infos);
+
+static int glink_ssr_ssr_cb(struct notifier_block *this,
+ unsigned long code, void *data)
+{
+ struct glink_ssr_nb *nb = container_of(this, struct glink_ssr_nb, nb);
+ struct glink_ssr *ssr = nb->ssr;
+ struct device *dev = ssr->dev;
+ struct do_cleanup_msg msg;
+ int ret;
+
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ ssr->seq_num++;
+ reinit_completion(&ssr->completion);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
+ msg.seq_num = cpu_to_le32(ssr->seq_num);
+ msg.name_len = cpu_to_le32(strlen(nb->glink_label));
+ strlcpy(msg.name, nb->glink_label, sizeof(msg.name));
+
+ GLINK_INFO("%s: notify of %s seq_num:%d\n",
+ dev->parent->of_node->name, nb->glink_label,
+ ssr->seq_num);
+
+ ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
+ if (ret) {
+ GLINK_ERR(dev, "fail to send do cleanup to %s %d\n",
+ nb->ssr_label, ret);
+ return NOTIFY_DONE;
+ }
+
+ ret = wait_for_completion_timeout(&ssr->completion, HZ);
+ if (!ret)
+ GLINK_ERR(dev, "timeout waiting for cleanup resp\n");
+ }
+ return NOTIFY_DONE;
+}
+
+static int glink_ssr_callback(struct rpmsg_device *rpdev,
+ void *data, int len, void *priv, u32 addr)
+{
+ struct cleanup_done_msg *msg = data;
+ struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+ if (len < sizeof(*msg)) {
+ GLINK_ERR(ssr->dev, "message too short\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(msg->version) != 0) {
+ GLINK_ERR(ssr->dev, "invalid version\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE)
+ return 0;
+
+ if (le32_to_cpu(msg->seq_num) != ssr->seq_num) {
+ GLINK_ERR(ssr->dev, "invalid response sequence number %d\n",
+ msg->seq_num);
+ return -EINVAL;
+ }
+
+ complete(&ssr->completion);
+
+ GLINK_INFO("%s: received seq_num:%d\n", ssr->dev->parent->of_node->name,
+ le32_to_cpu(msg->seq_num));
+
+ return 0;
+}
+
+static void glink_ssr_init_notify(struct glink_ssr *ssr)
+{
+ struct device *dev = ssr->dev;
+ struct device_node *node;
+ struct glink_ssr_nb *nb;
+ void *handle;
+ int ret;
+ int i = 0;
+
+ while (1) {
+ node = of_parse_phandle(dev->of_node, "qcom,notify-edges", i++);
+ if (!node)
+ break;
+
+ nb = devm_kzalloc(dev, sizeof(*nb), GFP_KERNEL);
+ if (!nb)
+ return;
+
+ ret = of_property_read_string(node, "label", &nb->ssr_label);
+ if (ret < 0)
+ nb->ssr_label = node->name;
+
+ ret = of_property_read_string(node, "qcom,glink-label",
+ &nb->glink_label);
+ if (ret < 0) {
+ GLINK_ERR(dev, "no qcom,glink-label for %s\n",
+ nb->ssr_label);
+ continue;
+ }
+
+ nb->nb.notifier_call = glink_ssr_ssr_cb;
+ nb->nb.priority = GLINK_SSR_PRIORITY;
+
+ handle = subsys_notif_register_notifier(nb->ssr_label, &nb->nb);
+ if (IS_ERR_OR_NULL(handle)) {
+ GLINK_ERR(dev, "register fail for %s SSR notifier\n",
+ nb->ssr_label);
+ continue;
+ }
+
+ nb->ssr = ssr;
+ nb->ssr_register_handle = handle;
+ list_add_tail(&nb->list, &ssr->notify_list);
+ }
+}
+
+static int glink_ssr_probe(struct rpmsg_device *rpdev)
+{
+ struct glink_ssr *ssr;
+
+ ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
+ if (!ssr)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ssr->notify_list);
+ init_completion(&ssr->completion);
+
+ ssr->dev = &rpdev->dev;
+ ssr->ept = rpdev->ept;
+
+ glink_ssr_init_notify(ssr);
+
+ dev_set_drvdata(&rpdev->dev, ssr);
+
+ return 0;
+}
+
+static void glink_ssr_remove(struct rpmsg_device *rpdev)
+{
+ struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+ struct glink_ssr_nb *nb;
+
+ list_for_each_entry(nb, &ssr->notify_list, list) {
+ subsys_notif_unregister_notifier(nb->ssr_register_handle,
+ &nb->nb);
+ }
+
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static const struct rpmsg_device_id glink_ssr_match[] = {
+ { "glink_ssr" },
+ {}
+};
+
+static struct rpmsg_driver glink_ssr_driver = {
+ .probe = glink_ssr_probe,
+ .remove = glink_ssr_remove,
+ .callback = glink_ssr_callback,
+ .id_table = glink_ssr_match,
+ .drv = {
+ .name = "glink_ssr",
+ },
+};
+module_rpmsg_driver(glink_ssr_driver);
+
+static int glink_probe_ssr_cb(struct notifier_block *this,
+ unsigned long code, void *data)
+{
+ struct edge_info *einfo = container_of(this, struct edge_info, nb);
+
+ GLINK_INFO("received %d for %s", code, einfo->ssr_label);
+
+ switch (code) {
+ case SUBSYS_AFTER_POWERUP:
+ einfo->register_fn(einfo);
+ break;
+ case SUBSYS_AFTER_SHUTDOWN:
+ einfo->unregister_fn(einfo);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int glink_probe_smem_reg(struct edge_info *einfo)
+{
+ struct device *dev = einfo->dev;
+
+ einfo->glink = qcom_glink_smem_register(dev, einfo->node);
+ if (IS_ERR_OR_NULL(einfo->glink)) {
+ GLINK_ERR(dev, "register failed for %s\n", einfo->ssr_label);
+ einfo->glink = NULL;
+ }
+ GLINK_INFO("register successful for %s\n", einfo->ssr_label);
+
+ return 0;
+}
+
+static void glink_probe_smem_unreg(struct edge_info *einfo)
+{
+ if (einfo->glink)
+ qcom_glink_smem_unregister(einfo->glink);
+
+ einfo->glink = NULL;
+ GLINK_INFO("unregister for %s\n", einfo->ssr_label);
+}
+
+static int glink_probe_spss_reg(struct edge_info *einfo)
+{
+ struct device *dev = einfo->dev;
+
+ einfo->glink = qcom_glink_spss_register(dev, einfo->node);
+ if (IS_ERR_OR_NULL(einfo->glink)) {
+ GLINK_ERR(dev, "register failed for %s\n", einfo->ssr_label);
+ einfo->glink = NULL;
+ }
+ GLINK_INFO("register successful for %s\n", einfo->ssr_label);
+
+ return 0;
+}
+
+static void glink_probe_spss_unreg(struct edge_info *einfo)
+{
+ if (einfo->glink)
+ qcom_glink_spss_unregister(einfo->glink);
+
+ einfo->glink = NULL;
+ GLINK_INFO("unregister for %s\n", einfo->ssr_label);
+}
+
+static void probe_subsystem(struct device *dev, struct device_node *np)
+{
+ struct edge_info *einfo;
+ const char *transport;
+ void *handle;
+ int ret;
+
+ einfo = devm_kzalloc(dev, sizeof(*einfo), GFP_KERNEL);
+ if (!einfo)
+ return;
+
+ ret = of_property_read_string(np, "label", &einfo->ssr_label);
+ if (ret < 0)
+ einfo->ssr_label = np->name;
+
+ ret = of_property_read_string(np, "qcom,glink-label",
+ &einfo->glink_label);
+ if (ret < 0) {
+ GLINK_ERR(dev, "no qcom,glink-label for %s\n",
+ einfo->ssr_label);
+ goto free_einfo;
+ }
+
+ einfo->dev = dev;
+ einfo->node = np;
+
+ ret = of_property_read_string(np, "transport", &transport);
+ if (ret < 0) {
+ GLINK_ERR(dev, "%s missing transport\n", einfo->ssr_label);
+ goto free_einfo;
+ }
+
+ if (!strcmp(transport, "smem")) {
+ einfo->register_fn = glink_probe_smem_reg;
+ einfo->unregister_fn = glink_probe_smem_unreg;
+ } else if (!strcmp(transport, "spss")) {
+ einfo->register_fn = glink_probe_spss_reg;
+ einfo->unregister_fn = glink_probe_spss_unreg;
+ } else if (!strcmp(transport, "spi")) {
+ /* SPI SSR is self contained */
+ einfo->glink = qcom_glink_spi_register(dev, np);
+ if (IS_ERR_OR_NULL(einfo->glink)) {
+ GLINK_ERR(dev, "%s failed\n", einfo->ssr_label);
+ goto free_einfo;
+ }
+ list_add_tail(&einfo->list, &edge_infos);
+ return;
+ }
+
+ einfo->nb.notifier_call = glink_probe_ssr_cb;
+
+ handle = subsys_notif_register_notifier(einfo->ssr_label, &einfo->nb);
+ if (IS_ERR_OR_NULL(handle)) {
+ GLINK_ERR(dev, "could not register for SSR notifier for %s\n",
+ einfo->ssr_label);
+ goto free_einfo;
+ }
+
+ list_add_tail(&einfo->list, &edge_infos);
+ GLINK_INFO("probe successful for %s\n", einfo->ssr_label);
+
+ return;
+
+free_einfo:
+ devm_kfree(dev, einfo);
+}
+
+static int glink_probe(struct platform_device *pdev)
+{
+ struct device_node *pn = pdev->dev.of_node;
+ struct device_node *cn;
+
+ for_each_available_child_of_node(pn, cn) {
+ probe_subsystem(&pdev->dev, cn);
+ }
+ return 0;
+}
+
+static const struct of_device_id glink_match_table[] = {
+ { .compatible = "qcom,glink" },
+ {},
+};
+
+static struct platform_driver glink_probe_driver = {
+ .probe = glink_probe,
+ .driver = {
+ .name = "msm_glink",
+ .of_match_table = glink_match_table,
+ },
+};
+
+static int __init glink_probe_init(void)
+{
+ int ret;
+
+ glink_ilc = ipc_log_context_create(GLINK_PROBE_LOG_PAGE_CNT,
+ "glink_probe", 0);
+
+ ret = platform_driver_register(&glink_probe_driver);
+ if (ret) {
+ pr_err("%s: glink_probe register failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+arch_initcall(glink_probe_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GLINK probe helper driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
new file mode 100644
index 0000000..d0d43d4
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for msm-bus driver specific files
+#
+obj-y += msm_bus_core.o msm_bus_client_api.o
+obj-$(CONFIG_OF) += msm_bus_of.o
+obj-$(CONFIG_MSM_RPM_SMD) += msm_bus_rpm_smd.o
+
+ifdef CONFIG_QCOM_BUS_CONFIG_RPMH
+ obj-y += msm_bus_fabric_rpmh.o msm_bus_arb_rpmh.o msm_bus_rules.o \
+ msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o
+ obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
+else
+ obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
+ msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o
+ obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
+endif
+
+obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
new file mode 100644
index 0000000..d5792ad
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_node_device_type;
+struct link_node {
+ uint64_t lnode_ib[NUM_CTX];
+ uint64_t lnode_ab[NUM_CTX];
+ int next;
+ struct device *next_dev;
+ struct list_head link;
+ uint32_t in_use;
+ const char *cl_name;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+ int (*qos_init)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq);
+ int (*set_bw)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq);
+ int (*limit_mport)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq, int enable_lim,
+ uint64_t lim_bw);
+ bool (*update_bw_reg)(int mode);
+};
+
+struct nodebw {
+ uint64_t sum_ab;
+ uint64_t last_sum_ab;
+ uint64_t max_ib;
+ uint64_t cur_clk_hz;
+ uint32_t util_used;
+ uint32_t vrail_used;
+};
+
+struct msm_bus_fab_device_type {
+ void __iomem *qos_base;
+ phys_addr_t pqos_base;
+ size_t qos_range;
+ uint32_t base_offset;
+ uint32_t qos_freq;
+ uint32_t qos_off;
+ struct msm_bus_noc_ops noc_ops;
+ enum msm_bus_hw_sel bus_type;
+ bool bypass_qos_prg;
+};
+
+struct qos_params_type {
+ int mode;
+ unsigned int prio_lvl;
+ unsigned int prio_rd;
+ unsigned int prio_wr;
+ unsigned int prio1;
+ unsigned int prio0;
+ unsigned int reg_prio1;
+ unsigned int reg_prio0;
+ unsigned int gp;
+ unsigned int thmp;
+ unsigned int ws;
+ u64 bw_buffer;
+};
+
+struct node_util_levels_type {
+ uint64_t threshold;
+ uint32_t util_fact;
+};
+
+struct node_agg_params_type {
+ uint32_t agg_scheme;
+ uint32_t num_aggports;
+ unsigned int buswidth;
+ uint32_t vrail_comp;
+ uint32_t num_util_levels;
+ struct node_util_levels_type *util_levels;
+};
+
+struct msm_bus_node_info_type {
+ const char *name;
+ unsigned int id;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ int num_ports;
+ int num_qports;
+ int *qport;
+ struct qos_params_type qos_params;
+ unsigned int num_connections;
+ unsigned int num_blist;
+ bool is_fab_dev;
+ bool virt_dev;
+ bool is_traversed;
+ unsigned int *connections;
+ unsigned int *bl_cons;
+ struct device **dev_connections;
+ struct device **black_connections;
+ unsigned int bus_device_id;
+ struct device *bus_device;
+ struct rule_update_path_info rule;
+ uint64_t lim_bw;
+ bool defer_qos;
+ struct node_agg_params_type agg_params;
+};
+
+struct msm_bus_node_device_type {
+ struct msm_bus_node_info_type *node_info;
+ struct msm_bus_fab_device_type *fabdev;
+ int num_lnodes;
+ struct link_node *lnode_list;
+ struct nodebw node_bw[NUM_CTX];
+ struct list_head link;
+ unsigned int ap_owned;
+ struct nodeclk clk[NUM_CTX];
+ struct nodeclk bus_qos_clk;
+ uint32_t num_node_qos_clks;
+ struct nodeclk *node_qos_clks;
+ struct device_node *of_node;
+ struct device dev;
+ bool dirty;
+ struct list_head dev_link;
+ struct list_head devlist;
+};
+
+static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d)
+{
+ return container_of(d, struct msm_bus_node_device_type, dev);
+}
+
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+ int throttle_en, uint64_t lim_bw);
+int msm_bus_commit_data(struct list_head *clist);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+ size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+ *msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+ struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+ struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
new file mode 100644
index 0000000..4bd5273
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
@@ -0,0 +1,1419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+#define NUM_CL_HANDLES 50
+#define NUM_LNODES 3
+#define MAX_STR_CL 50
+
+struct bus_search_type {
+ struct list_head link;
+ struct list_head node_list;
+};
+
+struct handle_type {
+ int num_entries;
+ struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+static LIST_HEAD(input_list);
+static LIST_HEAD(apply_list);
+static LIST_HEAD(commit_list);
+
+DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ list_for_each_entry(bus_node, black_list, link) {
+ if (bus_node->node_info->id == id)
+ return true;
+ }
+ return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+ *traverse_list, struct list_head *route_list)
+{
+ struct bus_search_type *search_node;
+
+ if (list_empty(edge_list) && list_empty(traverse_list))
+ return;
+
+ search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+ INIT_LIST_HEAD(&search_node->node_list);
+ list_splice_init(edge_list, traverse_list);
+ list_splice_init(traverse_list, &search_node->node_list);
+ list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ */
+uint64_t msm_bus_div64(uint64_t num, unsigned int base)
+{
+ uint64_t *n = #
+
+ if ((num > 0) && (num < base))
+ return 1;
+
+ switch (base) {
+ case 0:
+ WARN(1, "AXI: Divide by 0 attempted\n");
+ case 1: return num;
+ case 2: return (num >> 1);
+ case 4: return (num >> 2);
+ case 8: return (num >> 3);
+ case 16: return (num >> 4);
+ case 32: return (num >> 5);
+ }
+
+ do_div(*n, base);
+ return *n;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *bnode = to_msm_bus_node(dev);
+
+ if (bnode)
+ ret = (bnode->node_info->id == *(unsigned int *)id);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int gen_lnode(struct device *dev,
+ int next_hop, int prev_idx, const char *cl_name)
+{
+ struct link_node *lnode;
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ int lnode_idx = -1;
+
+ if (!dev)
+ goto exit_gen_lnode;
+
+ cur_dev = to_msm_bus_node(dev);
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ goto exit_gen_lnode;
+ }
+
+ if (!cur_dev->num_lnodes) {
+ cur_dev->lnode_list = devm_kzalloc(dev,
+ sizeof(struct link_node) * NUM_LNODES,
+ GFP_KERNEL);
+ if (!cur_dev->lnode_list)
+ goto exit_gen_lnode;
+
+ lnode = cur_dev->lnode_list;
+ cur_dev->num_lnodes = NUM_LNODES;
+ lnode_idx = 0;
+ } else {
+ int i;
+
+ for (i = 0; i < cur_dev->num_lnodes; i++) {
+ if (!cur_dev->lnode_list[i].in_use)
+ break;
+ }
+
+ if (i < cur_dev->num_lnodes) {
+ lnode = &cur_dev->lnode_list[i];
+ lnode_idx = i;
+ } else {
+ struct link_node *realloc_list;
+ size_t cur_size = sizeof(struct link_node) *
+ cur_dev->num_lnodes;
+
+ cur_dev->num_lnodes += NUM_LNODES;
+ realloc_list = msm_bus_realloc_devmem(
+ dev,
+ cur_dev->lnode_list,
+ cur_size,
+ sizeof(struct link_node) *
+ cur_dev->num_lnodes, GFP_KERNEL);
+
+ if (!realloc_list)
+ goto exit_gen_lnode;
+
+ cur_dev->lnode_list = realloc_list;
+ lnode = &cur_dev->lnode_list[i];
+ lnode_idx = i;
+ }
+ }
+
+ lnode->in_use = 1;
+ lnode->cl_name = cl_name;
+ if (next_hop == cur_dev->node_info->id) {
+ lnode->next = -1;
+ lnode->next_dev = NULL;
+ } else {
+ lnode->next = prev_idx;
+ lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &next_hop,
+ msm_bus_device_match_adhoc);
+ }
+
+ memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+ memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+ return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+ int lnode_idx)
+{
+ int ret = 0;
+
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ ret = -ENODEV;
+ goto exit_remove_lnode;
+ }
+
+ if (lnode_idx != -1) {
+ if (!cur_dev->num_lnodes ||
+ (lnode_idx > (cur_dev->num_lnodes - 1))) {
+ MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+ __func__, lnode_idx, cur_dev->num_lnodes);
+ ret = -ENODEV;
+ goto exit_remove_lnode;
+ }
+
+ cur_dev->lnode_list[lnode_idx].next = -1;
+ cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+ cur_dev->lnode_list[lnode_idx].in_use = 0;
+ cur_dev->lnode_list[lnode_idx].cl_name = NULL;
+ }
+
+exit_remove_lnode:
+ return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+ struct list_head *black_list, int found,
+ const char *cl_name)
+{
+ struct bus_search_type *search_node, *temp_search_node;
+ struct msm_bus_node_device_type *bus_node;
+ struct list_head *bl_list;
+ struct list_head *temp_bl_list;
+ int search_dev_id = dest;
+ struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &dest,
+ msm_bus_device_match_adhoc);
+ int lnode_hop = -1;
+
+ if (!found)
+ goto reset_links;
+
+ if (!dest_dev) {
+ MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+ goto exit_prune_path;
+ }
+
+ lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
+
+ list_for_each_entry_reverse(search_node, route_list, link) {
+ list_for_each_entry(bus_node, &search_node->node_list, link) {
+ unsigned int i;
+
+ for (i = 0; i < bus_node->node_info->num_connections;
+ i++) {
+ if (bus_node->node_info->connections[i] ==
+ search_dev_id) {
+ dest_dev = bus_find_device(
+ &msm_bus_type,
+ NULL,
+ (void *)
+ &bus_node->node_info->id,
+ msm_bus_device_match_adhoc);
+
+ if (!dest_dev) {
+ lnode_hop = -1;
+ goto reset_links;
+ }
+
+ lnode_hop = gen_lnode(dest_dev,
+ search_dev_id,
+ lnode_hop, cl_name);
+ search_dev_id =
+ bus_node->node_info->id;
+ break;
+ }
+ }
+ }
+ }
+reset_links:
+ list_for_each_entry_safe(search_node, temp_search_node, route_list,
+ link) {
+ list_for_each_entry(bus_node, &search_node->node_list,
+ link)
+ bus_node->node_info->is_traversed = false;
+
+ list_del(&search_node->link);
+ kfree(search_node);
+ }
+
+ list_for_each_safe(bl_list, temp_bl_list, black_list)
+ list_del(bl_list);
+
+exit_prune_path:
+ return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+ struct list_head *black_list)
+{
+ unsigned int i;
+
+ for (i = 0; i < node->node_info->num_blist; i++) {
+ struct msm_bus_node_device_type *bdev;
+
+ bdev = to_msm_bus_node(node->node_info->black_connections[i]);
+ list_add_tail(&bdev->link, black_list);
+ }
+}
+
+static int getpath(struct device *src_dev, int dest, const char *cl_name)
+{
+ struct list_head traverse_list;
+ struct list_head edge_list;
+ struct list_head route_list;
+ struct list_head black_list;
+ struct msm_bus_node_device_type *src_node;
+ struct bus_search_type *search_node;
+ int found = 0;
+ int depth_index = 0;
+ int first_hop = -1;
+ int src;
+
+ INIT_LIST_HEAD(&traverse_list);
+ INIT_LIST_HEAD(&edge_list);
+ INIT_LIST_HEAD(&route_list);
+ INIT_LIST_HEAD(&black_list);
+
+ if (!src_dev) {
+ MSM_BUS_ERR("%s: Cannot locate src dev ", __func__);
+ goto exit_getpath;
+ }
+
+ src_node = to_msm_bus_node(src_dev);
+ if (!src_node) {
+ MSM_BUS_ERR("%s:Fatal, Source node not found", __func__);
+ goto exit_getpath;
+ }
+ src = src_node->node_info->id;
+ list_add_tail(&src_node->link, &traverse_list);
+
+ while ((!found && !list_empty(&traverse_list))) {
+ struct msm_bus_node_device_type *bus_node = NULL;
+ /* Locate dest_id in the traverse list */
+ list_for_each_entry(bus_node, &traverse_list, link) {
+ if (bus_node->node_info->id == dest) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ unsigned int i;
+ /* Setup the new edge list */
+ list_for_each_entry(bus_node, &traverse_list, link) {
+ /* Setup list of black-listed nodes */
+ setup_bl_list(bus_node, &black_list);
+
+ for (i = 0; i <
+ bus_node->node_info->num_connections; i++) {
+ bool skip;
+ struct msm_bus_node_device_type
+ *node_conn;
+ node_conn = to_msm_bus_node(
+ bus_node->node_info->dev_connections[i]);
+ if (
+ node_conn->node_info->is_traversed) {
+ MSM_BUS_ERR("Circ Path %d\n",
+ node_conn->node_info->id);
+ goto reset_traversed;
+ }
+ skip = chk_bl_list(&black_list,
+ bus_node->node_info->connections[i]);
+ if (!skip) {
+ list_add_tail(
+ &node_conn->link, &edge_list);
+ node_conn->node_info->is_traversed =
+ true;
+ }
+ }
+ }
+
+ /* Keep tabs of the previous search list */
+ search_node = kzalloc(sizeof(struct bus_search_type),
+ GFP_KERNEL);
+ INIT_LIST_HEAD(&search_node->node_list);
+ list_splice_init(&traverse_list,
+ &search_node->node_list);
+ /* Add the previous search list to a route list */
+ list_add_tail(&search_node->link, &route_list);
+ /* Advancing the list depth */
+ depth_index++;
+ list_splice_init(&edge_list, &traverse_list);
+ }
+ }
+reset_traversed:
+ copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+ first_hop = prune_path(&route_list, dest, src, &black_list, found,
+ cl_name);
+
+exit_getpath:
+ return first_hop;
+}
+
+static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+ struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+ uint64_t max_ib;
+ uint64_t sum_ab;
+ uint64_t bw_max_hz;
+ uint32_t util_fact = 0;
+ uint32_t vrail_comp = 0;
+ struct node_util_levels_type *utils;
+ int i;
+ int num_util_levels;
+
+ /*
+ * Account for Util factor and vrail comp.
+ * Util factor is picked according to the current sum(AB) for this
+ * node and for this context.
+ * Vrail comp is fixed for the entire performance range.
+ * They default to 100 if absent.
+ *
+ * The aggregated clock is computed as:
+ * Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+ * / bus-width
+ */
+ if (bus_dev->node_info->agg_params.num_util_levels) {
+ utils = bus_dev->node_info->agg_params.util_levels;
+ num_util_levels =
+ bus_dev->node_info->agg_params.num_util_levels;
+ } else {
+ utils = fab_dev->node_info->agg_params.util_levels;
+ num_util_levels =
+ fab_dev->node_info->agg_params.num_util_levels;
+ }
+
+ sum_ab = bus_dev->node_bw[ctx].sum_ab;
+ max_ib = bus_dev->node_bw[ctx].max_ib;
+
+ for (i = 0; i < num_util_levels; i++) {
+ if (sum_ab < utils[i].threshold) {
+ util_fact = utils[i].util_fact;
+ break;
+ }
+ }
+ if (i == num_util_levels)
+ util_fact = utils[(num_util_levels - 1)].util_fact;
+
+ vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+ bus_dev->node_info->agg_params.vrail_comp :
+ fab_dev->node_info->agg_params.vrail_comp;
+
+ bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+ bus_dev->node_bw[ctx].util_used = util_fact;
+
+ if (util_fact && (util_fact != 100)) {
+ sum_ab *= util_fact;
+ sum_ab = msm_bus_div64(sum_ab, 100);
+ }
+
+ if (vrail_comp && (vrail_comp != 100)) {
+ max_ib *= 100;
+ max_ib = msm_bus_div64(max_ib, vrail_comp);
+ }
+
+ /* Account for multiple channels if any */
+ if (bus_dev->node_info->agg_params.num_aggports > 1)
+ sum_ab = msm_bus_div64(sum_ab,
+ bus_dev->node_info->agg_params.num_aggports);
+
+ if (!bus_dev->node_info->agg_params.buswidth) {
+ MSM_BUS_WARN("No bus width found for %d. Using default\n",
+ bus_dev->node_info->id);
+ bus_dev->node_info->agg_params.buswidth = 8;
+ }
+
+ bw_max_hz = max(max_ib, sum_ab);
+ bw_max_hz = msm_bus_div64(bw_max_hz,
+ bus_dev->node_info->agg_params.buswidth);
+
+ return bw_max_hz;
+}
+
+static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+ struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+ uint64_t max_ib;
+ uint64_t sum_ab;
+ uint64_t bw_max_hz;
+ uint32_t util_fact = 0;
+ uint32_t vrail_comp = 0;
+
+ /*
+ * Util_fact and vrail comp are obtained from fabric/Node's dts
+ * properties and are fixed for the entire performance range.
+ * They default to 100 if absent.
+ *
+ * The clock frequency is computed as:
+ * Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+ * / bus-width
+ */
+ util_fact = fab_dev->node_info->agg_params.util_levels[0].util_fact;
+ vrail_comp = fab_dev->node_info->agg_params.vrail_comp;
+
+ if (bus_dev->node_info->agg_params.num_util_levels)
+ util_fact =
+ bus_dev->node_info->agg_params.util_levels[0].util_fact ?
+ bus_dev->node_info->agg_params.util_levels[0].util_fact :
+ util_fact;
+
+ vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+ bus_dev->node_info->agg_params.vrail_comp :
+ vrail_comp;
+
+ bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+ bus_dev->node_bw[ctx].util_used = util_fact;
+ sum_ab = bus_dev->node_bw[ctx].sum_ab;
+ max_ib = bus_dev->node_bw[ctx].max_ib;
+
+ if (util_fact && (util_fact != 100)) {
+ sum_ab *= util_fact;
+ sum_ab = msm_bus_div64(sum_ab, 100);
+ }
+
+ if (vrail_comp && (vrail_comp != 100)) {
+ max_ib *= 100;
+ max_ib = msm_bus_div64(max_ib, vrail_comp);
+ }
+
+ /* Account for multiple channels if any */
+ if (bus_dev->node_info->agg_params.num_aggports > 1)
+ sum_ab = msm_bus_div64(sum_ab,
+ bus_dev->node_info->agg_params.num_aggports);
+
+ if (!bus_dev->node_info->agg_params.buswidth) {
+ MSM_BUS_WARN("No bus width found for %d. Using default\n",
+ bus_dev->node_info->id);
+ bus_dev->node_info->agg_params.buswidth = 8;
+ }
+
+ bw_max_hz = max(max_ib, sum_ab);
+ bw_max_hz = msm_bus_div64(bw_max_hz,
+ bus_dev->node_info->agg_params.buswidth);
+
+ return bw_max_hz;
+}
+
+static uint64_t aggregate_bus_req(struct msm_bus_node_device_type *bus_dev,
+ int ctx)
+{
+ uint64_t bw_hz = 0;
+ int i;
+ struct msm_bus_node_device_type *fab_dev = NULL;
+ uint32_t agg_scheme;
+ uint64_t max_ib = 0;
+ uint64_t sum_ab = 0;
+
+ if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+ MSM_BUS_ERR("Bus node pointer is Invalid");
+ goto exit_agg_bus_req;
+ }
+
+ fab_dev = to_msm_bus_node(bus_dev->node_info->bus_device);
+ for (i = 0; i < bus_dev->num_lnodes; i++) {
+ max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+ sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+ }
+
+ bus_dev->node_bw[ctx].sum_ab = sum_ab;
+ bus_dev->node_bw[ctx].max_ib = max_ib;
+
+ if (bus_dev->node_info->agg_params.agg_scheme != AGG_SCHEME_NONE)
+ agg_scheme = bus_dev->node_info->agg_params.agg_scheme;
+ else
+ agg_scheme = fab_dev->node_info->agg_params.agg_scheme;
+
+ switch (agg_scheme) {
+ case AGG_SCHEME_1:
+ bw_hz = scheme1_agg_scheme(bus_dev, fab_dev, ctx);
+ break;
+ case AGG_SCHEME_LEG:
+ bw_hz = legacy_agg_scheme(bus_dev, fab_dev, ctx);
+ break;
+ }
+
+exit_agg_bus_req:
+ return bw_hz;
+}
+
+
+static void del_inp_list(struct list_head *list)
+{
+ struct rule_update_path_info *rule_node;
+ struct rule_update_path_info *rule_node_tmp;
+
+ list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) {
+ list_del(&rule_node->link);
+ rule_node->added = false;
+ }
+}
+
+static void del_op_list(struct list_head *list)
+{
+ struct rule_apply_rcm_info *rule;
+ struct rule_apply_rcm_info *rule_tmp;
+
+ list_for_each_entry_safe(rule, rule_tmp, list, link)
+ list_del(&rule->link);
+}
+
+static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
+{
+ struct rule_apply_rcm_info *rule;
+ struct device *dev = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int ret = 0;
+
+ list_for_each_entry(rule, list, link) {
+ if (rule->after_clk_commit != after_clk_commit)
+ continue;
+
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &rule->id,
+ msm_bus_device_match_adhoc);
+
+ if (!dev) {
+ MSM_BUS_ERR("Can't find dev node for %d", rule->id);
+ continue;
+ }
+ dev_info = to_msm_bus_node(dev);
+
+ ret = msm_bus_enable_limiter(dev_info, rule->throttle,
+ rule->lim_bw);
+ if (ret)
+ MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
+ }
+
+ return ret;
+}
+
+static void commit_data(void)
+{
+ bool rules_registered = msm_rule_are_rules_registered();
+
+ if (rules_registered) {
+ msm_rules_update_path(&input_list, &apply_list);
+ msm_bus_apply_rules(&apply_list, false);
+ }
+
+ msm_bus_commit_data(&commit_list);
+
+ if (rules_registered) {
+ msm_bus_apply_rules(&apply_list, true);
+ del_inp_list(&input_list);
+ del_op_list(&apply_list);
+ }
+ INIT_LIST_HEAD(&input_list);
+ INIT_LIST_HEAD(&apply_list);
+ INIT_LIST_HEAD(&commit_list);
+}
+
+static void add_node_to_clist(struct msm_bus_node_device_type *node)
+{
+ struct msm_bus_node_device_type *node_parent =
+ to_msm_bus_node(node->node_info->bus_device);
+
+ if (!node->dirty) {
+ list_add_tail(&node->link, &commit_list);
+ node->dirty = true;
+ }
+
+ if (!node_parent->dirty) {
+ list_add_tail(&node_parent->link, &commit_list);
+ node_parent->dirty = true;
+ }
+}
+
+static int update_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+ uint64_t act_req_bw, uint64_t slp_req_ib,
+ uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+ int src_idx, int ctx)
+{
+ struct device *next_dev = NULL;
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int curr_idx;
+ int ret = 0;
+ struct rule_update_path_info *rule_node;
+ bool rules_registered = msm_rule_are_rules_registered();
+
+ if (IS_ERR_OR_NULL(src_dev)) {
+ MSM_BUS_ERR("%s: No source device", __func__);
+ ret = -ENODEV;
+ goto exit_update_path;
+ }
+
+ next_dev = src_dev;
+
+ if (src_idx < 0) {
+ MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+ curr_idx = src_idx;
+
+ while (next_dev) {
+ int i;
+
+ dev_info = to_msm_bus_node(next_dev);
+
+ if (curr_idx >= dev_info->num_lnodes) {
+ MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+ __func__, curr_idx, dev_info->num_lnodes);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+
+ lnode = &dev_info->lnode_list[curr_idx];
+ if (!lnode) {
+ MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+ __func__, curr_idx);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+ lnode->lnode_ib[ACTIVE_CTX] = act_req_ib;
+ lnode->lnode_ab[ACTIVE_CTX] = act_req_bw;
+ lnode->lnode_ib[DUAL_CTX] = slp_req_ib;
+ lnode->lnode_ab[DUAL_CTX] = slp_req_bw;
+
+ for (i = 0; i < NUM_CTX; i++)
+ dev_info->node_bw[i].cur_clk_hz =
+ aggregate_bus_req(dev_info, i);
+
+ add_node_to_clist(dev_info);
+
+ if (rules_registered) {
+ rule_node = &dev_info->node_info->rule;
+ rule_node->id = dev_info->node_info->id;
+ rule_node->ib = dev_info->node_bw[ACTIVE_CTX].max_ib;
+ rule_node->ab = dev_info->node_bw[ACTIVE_CTX].sum_ab;
+ rule_node->clk =
+ dev_info->node_bw[ACTIVE_CTX].cur_clk_hz;
+ if (!rule_node->added) {
+ list_add_tail(&rule_node->link, &input_list);
+ rule_node->added = true;
+ }
+ }
+
+ next_dev = lnode->next_dev;
+ curr_idx = lnode->next;
+ }
+
+exit_update_path:
+ return ret;
+}
+
+static int remove_path(struct device *src_dev, int dst, uint64_t cur_ib,
+ uint64_t cur_ab, int src_idx, int active_only)
+{
+ struct device *next_dev = NULL;
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int ret = 0;
+ int cur_idx = src_idx;
+ int next_idx;
+
+ /* Update the current path to zero out all request from
+ * this cient on all paths
+ */
+ if (!src_dev) {
+ MSM_BUS_ERR("%s: Can't find source device", __func__);
+ ret = -ENODEV;
+ goto exit_remove_path;
+ }
+
+ ret = update_path(src_dev, dst, 0, 0, 0, 0, cur_ib, cur_ab, src_idx,
+ active_only);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+ __func__, ACTIVE_CTX);
+ goto exit_remove_path;
+ }
+
+ next_dev = src_dev;
+
+ while (next_dev) {
+ dev_info = to_msm_bus_node(next_dev);
+ lnode = &dev_info->lnode_list[cur_idx];
+ next_idx = lnode->next;
+ next_dev = lnode->next_dev;
+ remove_lnode(dev_info, cur_idx);
+ cur_idx = next_idx;
+ }
+
+exit_remove_path:
+ return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+ struct device *dev_node;
+ struct device *dev_it;
+ unsigned int hop = 1;
+ int idx;
+ struct msm_bus_node_device_type *devinfo;
+ int i;
+
+ dev_node = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+
+ if (!dev_node) {
+ MSM_BUS_ERR("SRC NOT FOUND %d", src);
+ return;
+ }
+
+ idx = curr;
+ devinfo = to_msm_bus_node(dev_node);
+ dev_it = dev_node;
+
+ MSM_BUS_ERR("Route list Src %d", src);
+ while (dev_it) {
+ struct msm_bus_node_device_type *busdev =
+ to_msm_bus_node(devinfo->node_info->bus_device);
+
+ MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+ devinfo->node_info->id, active_only);
+
+ for (i = 0; i < NUM_CTX; i++) {
+ MSM_BUS_ERR("dev info sel ib %llu",
+ devinfo->node_bw[i].cur_clk_hz);
+ MSM_BUS_ERR("dev info sel ab %llu",
+ devinfo->node_bw[i].sum_ab);
+ }
+
+ dev_it = devinfo->lnode_list[idx].next_dev;
+ idx = devinfo->lnode_list[idx].next;
+ if (dev_it)
+ devinfo = to_msm_bus_node(dev_it);
+
+ MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+ MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+ if (idx < 0)
+ break;
+ hop++;
+ }
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+ int i;
+ struct msm_bus_scale_pdata *pdata;
+ int lnode, src, curr, dest;
+ uint64_t cur_clk, cur_bw;
+ struct msm_bus_client *client;
+ struct device *src_dev;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+ client = handle_list.cl_list[cl];
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+
+ curr = client->curr;
+ if ((curr < 0) || (curr >= pdata->num_usecases)) {
+ MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+ curr = 0;
+ }
+
+ MSM_BUS_DBG("%s: Unregistering client %p", __func__, client);
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = client->pdata->usecase[curr].vectors[i].src;
+ dest = client->pdata->usecase[curr].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ src_dev = client->src_devs[i];
+ cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+ cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+ remove_path(src_dev, dest, cur_clk, cur_bw, lnode,
+ pdata->active_only);
+ }
+ commit_data();
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+ kfree(client->src_pnode);
+ kfree(client->src_devs);
+ kfree(client);
+ handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static int alloc_handle_lst(int size)
+{
+ int ret = 0;
+ struct msm_bus_client **t_cl_list;
+
+ if (!handle_list.num_entries) {
+ t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+ * NUM_CL_HANDLES, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(t_cl_list)) {
+ ret = -ENOMEM;
+ MSM_BUS_ERR("%s: Failed to allocate handles list",
+ __func__);
+ goto exit_alloc_handle_lst;
+ }
+ handle_list.cl_list = t_cl_list;
+ handle_list.num_entries += NUM_CL_HANDLES;
+ } else {
+ t_cl_list = krealloc(handle_list.cl_list,
+ sizeof(struct msm_bus_client *) *
+ (handle_list.num_entries + NUM_CL_HANDLES),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(t_cl_list)) {
+ ret = -ENOMEM;
+ MSM_BUS_ERR("%s: Failed to allocate handles list",
+ __func__);
+ goto exit_alloc_handle_lst;
+ }
+
+ handle_list.cl_list = t_cl_list;
+ memset(&handle_list.cl_list[handle_list.num_entries], 0,
+ NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+ handle_list.num_entries += NUM_CL_HANDLES;
+ }
+exit_alloc_handle_lst:
+ return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+ uint32_t handle = 0;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < handle_list.num_entries; i++) {
+ if (i && !handle_list.cl_list[i]) {
+ handle = i;
+ break;
+ }
+ }
+
+ if (!handle) {
+ ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to allocate handle list",
+ __func__);
+ goto exit_gen_handle;
+ }
+ handle = i + 1;
+ }
+ handle_list.cl_list[handle] = client;
+exit_gen_handle:
+ return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+ int src, dest;
+ int i;
+ struct msm_bus_client *client = NULL;
+ int *lnode;
+ struct device *dev;
+ uint32_t handle = 0;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+ if (!client) {
+ MSM_BUS_ERR("%s: Error allocating client data", __func__);
+ goto exit_register_client;
+ }
+ client->pdata = pdata;
+
+ lnode = kcalloc(pdata->usecase->num_paths, sizeof(int), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(lnode)) {
+ MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+ goto exit_lnode_malloc_fail;
+ }
+ client->src_pnode = lnode;
+
+ client->src_devs = kcalloc(pdata->usecase->num_paths,
+ sizeof(struct device *), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(client->src_devs)) {
+ MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+ goto exit_src_dev_malloc_fail;
+ }
+ client->curr = -1;
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = pdata->usecase->vectors[i].src;
+ dest = pdata->usecase->vectors[i].dst;
+
+ if ((src < 0) || (dest < 0)) {
+ MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+ __func__, src, dest);
+ goto exit_invalid_data;
+ }
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+ if (IS_ERR_OR_NULL(dev)) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, src, dest);
+ goto exit_invalid_data;
+ }
+ client->src_devs[i] = dev;
+
+ lnode[i] = getpath(dev, dest, client->pdata->name);
+ if (lnode[i] < 0) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, src, dest);
+ goto exit_invalid_data;
+ }
+ }
+
+ handle = gen_handle(client);
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+ handle);
+ MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle,
+ client->pdata->name);
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return handle;
+exit_invalid_data:
+ kfree(client->src_devs);
+exit_src_dev_malloc_fail:
+ kfree(lnode);
+exit_lnode_malloc_fail:
+ kfree(client);
+exit_register_client:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return handle;
+}
+
+static int update_client_paths(struct msm_bus_client *client, bool log_trns,
+ unsigned int idx)
+{
+ int lnode, src, dest, cur_idx;
+ uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+ int i, ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct device *src_dev;
+
+ if (!client) {
+ MSM_BUS_ERR("Client handle Null");
+ ret = -ENXIO;
+ goto exit_update_client_paths;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("Client pdata Null");
+ ret = -ENXIO;
+ goto exit_update_client_paths;
+ }
+
+ cur_idx = client->curr;
+ client->curr = idx;
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = pdata->usecase[idx].vectors[i].src;
+ dest = pdata->usecase[idx].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ src_dev = client->src_devs[i];
+ req_clk = client->pdata->usecase[idx].vectors[i].ib;
+ req_bw = client->pdata->usecase[idx].vectors[i].ab;
+ if (cur_idx < 0) {
+ curr_clk = 0;
+ curr_bw = 0;
+ } else {
+ curr_clk =
+ client->pdata->usecase[cur_idx].vectors[i].ib;
+ curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+ MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+ curr_bw, curr_clk);
+ }
+
+ if (pdata->active_only) {
+ slp_clk = 0;
+ slp_bw = 0;
+ } else {
+ slp_clk = req_clk;
+ slp_bw = req_bw;
+ }
+
+ ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
+ slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+ __func__, ret, pdata->active_only);
+ goto exit_update_client_paths;
+ }
+
+ if (log_trns)
+ getpath_debug(src, lnode, pdata->active_only);
+ }
+ commit_data();
+exit_update_client_paths:
+ return ret;
+}
+
+static int update_context(uint32_t cl, bool active_only,
+ unsigned int ctx_idx)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct msm_bus_client *client;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ client = handle_list.cl_list[cl];
+ if (!client) {
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+ if (pdata->active_only == active_only) {
+ MSM_BUS_ERR("No change in context(%d==%d), skip\n",
+ pdata->active_only, active_only);
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ if (ctx_idx >= pdata->num_usecases) {
+ MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+ cl, ctx_idx);
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ pdata->active_only = active_only;
+
+ msm_bus_dbg_client_data(client->pdata, ctx_idx, cl);
+ ret = update_client_paths(client, false, ctx_idx);
+ if (ret) {
+ pr_err("%s: Err updating path\n", __func__);
+ goto exit_update_context;
+ }
+
+ trace_bus_update_request_end(pdata->name);
+
+exit_update_context:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct msm_bus_client *client;
+ const char *test_cl = "Null";
+ bool log_transaction = false;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ client = handle_list.cl_list[cl];
+ if (!client) {
+ MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+ __func__);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (index >= pdata->num_usecases) {
+ MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+ cl, index);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (client->curr == index) {
+ MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+ __func__, index);
+ goto exit_update_request;
+ }
+
+ if (!strcmp(test_cl, pdata->name))
+ log_transaction = true;
+
+ MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+ cl, index, client->curr, client->pdata->usecase->num_paths);
+ msm_bus_dbg_client_data(client->pdata, index, cl);
+ ret = update_client_paths(client, log_transaction, index);
+ if (ret) {
+ pr_err("%s: Err updating path\n", __func__);
+ goto exit_update_request;
+ }
+
+ trace_bus_update_request_end(pdata->name);
+
+exit_update_request:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+ if (cl) {
+ kfree(cl->name);
+ kfree(cl);
+ cl = NULL;
+ }
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+ int ret = 0;
+ char *test_cl = "test-client";
+ bool log_transaction = false;
+ u64 slp_ib, slp_ab;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (!strcmp(test_cl, cl->name))
+ log_transaction = true;
+
+ msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+ if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) {
+ MSM_BUS_DBG("%s:no change in request", cl->name);
+ goto exit_update_request;
+ }
+
+ if (cl->active_only) {
+ slp_ib = 0;
+ slp_ab = 0;
+ } else {
+ slp_ib = ib;
+ slp_ab = ab;
+ }
+
+ ret = update_path(cl->mas_dev, cl->slv, ib, ab, slp_ib, slp_ab,
+ cl->cur_act_ib, cl->cur_act_ab, cl->first_hop, cl->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+ __func__, ret, cl->active_only);
+ goto exit_update_request;
+ }
+
+ commit_data();
+ cl->cur_act_ib = ib;
+ cl->cur_act_ab = ab;
+ cl->cur_dual_ib = slp_ib;
+ cl->cur_dual_ab = slp_ab;
+
+ if (log_transaction)
+ getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+ trace_bus_update_request_end(cl->name);
+exit_update_request:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+
+ return ret;
+}
+
+static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+ u64 act_ib, u64 slp_ib, u64 slp_ab)
+{
+ int ret = 0;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("Invalid client handle %p", cl);
+ ret = -ENXIO;
+ goto exit_change_context;
+ }
+
+ if ((cl->cur_act_ib == act_ib) &&
+ (cl->cur_act_ab == act_ab) &&
+ (cl->cur_dual_ib == slp_ib) &&
+ (cl->cur_dual_ab == slp_ab)) {
+ MSM_BUS_ERR("No change in vote");
+ goto exit_change_context;
+ }
+
+ if (!slp_ab && !slp_ib)
+ cl->active_only = true;
+ msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_dual_ib);
+ ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib,
+ slp_ab, cl->cur_act_ab, cl->cur_act_ab,
+ cl->first_hop, cl->active_only);
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+ __func__, ret, cl->active_only);
+ goto exit_change_context;
+ }
+ commit_data();
+ cl->cur_act_ib = act_ib;
+ cl->cur_act_ab = act_ab;
+ cl->cur_dual_ib = slp_ib;
+ cl->cur_dual_ab = slp_ab;
+ trace_bus_update_request_end(cl->name);
+exit_change_context:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+
+ MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+ remove_path(cl->mas_dev, cl->slv, cl->cur_act_ib, cl->cur_act_ab,
+ cl->first_hop, cl->active_only);
+ commit_data();
+ msm_bus_dbg_remove_client(cl);
+ kfree(cl->name);
+ kfree(cl);
+exit_unregister_client:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+ struct msm_bus_client_handle *client = NULL;
+ int len = 0;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!(mas && slv && name)) {
+ pr_err("%s: Error: src dst name num_paths are required\n",
+ __func__);
+ goto exit_register;
+ }
+
+ client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+ if (!client) {
+ MSM_BUS_ERR("%s: Error allocating client data", __func__);
+ goto exit_register;
+ }
+
+ len = strnlen(name, MAX_STR_CL);
+ client->name = kzalloc((len + 1), GFP_KERNEL);
+ if (!client->name) {
+ MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+ strlcpy(client->name, name, MAX_STR_CL);
+ client->active_only = active_only;
+
+ client->mas = mas;
+ client->slv = slv;
+
+ client->mas_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &mas,
+ msm_bus_device_match_adhoc);
+ if (IS_ERR_OR_NULL(client->mas_dev)) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, client->mas, client->slv);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+
+ client->first_hop = getpath(client->mas_dev, client->slv, client->name);
+ if (client->first_hop < 0) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, client->mas, client->slv);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+
+ MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+ client->name);
+ msm_bus_dbg_add_client(client);
+exit_register:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return client;
+}
+/**
+ * msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ * @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+ arb_ops->register_client = register_client_adhoc;
+ arb_ops->update_request = update_request_adhoc;
+ arb_ops->unregister_client = unregister_client_adhoc;
+ arb_ops->update_context = update_context;
+
+ arb_ops->register_cl = register_adhoc;
+ arb_ops->unregister = unregister_adhoc;
+ arb_ops->update_bw = update_bw_adhoc;
+ arb_ops->update_bw_context = update_bw_context;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
new file mode 100644
index 0000000..f78bcd4
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -0,0 +1,1976 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+
+#define NUM_CL_HANDLES 50
+#define NUM_LNODES 3
+#define MAX_STR_CL 50
+
+#define MSM_BUS_MAS_ALC 144
+#define MSM_BUS_RSC_APPS 8000
+#define MSM_BUS_RSC_DISP 8001
+#define BCM_TCS_CMD_ACV_APPS 0x8
+
+struct bus_search_type {
+ struct list_head link;
+ struct list_head node_list;
+};
+
+struct handle_type {
+ int num_entries;
+ struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+static LIST_HEAD(commit_list);
+static LIST_HEAD(late_init_clist);
+static LIST_HEAD(query_list);
+
+DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ list_for_each_entry(bus_node, black_list, link) {
+ if (bus_node->node_info->id == id)
+ return true;
+ }
+ return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+ *traverse_list, struct list_head *route_list)
+{
+ struct bus_search_type *search_node;
+
+ if (list_empty(edge_list) && list_empty(traverse_list))
+ return;
+
+ search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+ INIT_LIST_HEAD(&search_node->node_list);
+ list_splice_init(edge_list, traverse_list);
+ list_splice_init(traverse_list, &search_node->node_list);
+ list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ */
+uint64_t msm_bus_div64(uint64_t num, unsigned int base)
+{
+ uint64_t *n = #
+
+ if ((num > 0) && (num < base))
+ return 1;
+
+ switch (base) {
+ case 0:
+ WARN(1, "AXI: Divide by 0 attempted\n");
+ case 1: return num;
+ case 2: return (num >> 1);
+ case 4: return (num >> 2);
+ case 8: return (num >> 3);
+ case 16: return (num >> 4);
+ case 32: return (num >> 5);
+ }
+
+ do_div(*n, base);
+ return *n;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *bnode = to_msm_bus_node(dev);
+
+ if (bnode)
+ ret = (bnode->node_info->id == *(unsigned int *)id);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static void bcm_add_bus_req(struct device *dev)
+{
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ struct msm_bus_node_device_type *bcm_dev = NULL;
+ struct link_node *lnode;
+ int lnode_idx = -1;
+ int max_num_lnodes = 0;
+ int i;
+
+ cur_dev = to_msm_bus_node(dev);
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ goto exit_bcm_add_bus_req;
+ }
+
+ if (!cur_dev->node_info->num_bcm_devs)
+ goto exit_bcm_add_bus_req;
+
+ for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+ if (cur_dev->node_info->bcm_req_idx[i] != -1)
+ continue;
+ bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+ max_num_lnodes = bcm_dev->bcmdev->num_bus_devs;
+ if (!bcm_dev->num_lnodes) {
+ bcm_dev->lnode_list = devm_kzalloc(dev,
+ sizeof(struct link_node) * max_num_lnodes,
+ GFP_KERNEL);
+ if (!bcm_dev->lnode_list)
+ goto exit_bcm_add_bus_req;
+
+ lnode = bcm_dev->lnode_list;
+ bcm_dev->num_lnodes = max_num_lnodes;
+ lnode_idx = 0;
+ } else {
+ int i;
+
+ for (i = 0; i < bcm_dev->num_lnodes; i++) {
+ if (!bcm_dev->lnode_list[i].in_use)
+ break;
+ }
+
+ if (i < bcm_dev->num_lnodes) {
+ lnode = &bcm_dev->lnode_list[i];
+ lnode_idx = i;
+ } else {
+ struct link_node *realloc_list;
+ size_t cur_size = sizeof(struct link_node) *
+ bcm_dev->num_lnodes;
+
+ bcm_dev->num_lnodes += NUM_LNODES;
+ realloc_list = msm_bus_realloc_devmem(
+ dev,
+ bcm_dev->lnode_list,
+ cur_size,
+ sizeof(struct link_node) *
+ bcm_dev->num_lnodes,
+ GFP_KERNEL);
+
+ if (!realloc_list)
+ goto exit_bcm_add_bus_req;
+
+ bcm_dev->lnode_list = realloc_list;
+ lnode = &bcm_dev->lnode_list[i];
+ lnode_idx = i;
+ }
+ }
+
+ lnode->in_use = 1;
+ lnode->bus_dev_id = cur_dev->node_info->id;
+ cur_dev->node_info->bcm_req_idx[i] = lnode_idx;
+ memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+ memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+ }
+
+exit_bcm_add_bus_req:
+ return;
+}
+
+static int gen_lnode(struct device *dev,
+ int next_hop, int prev_idx, const char *cl_name)
+{
+ struct link_node *lnode;
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ int lnode_idx = -1;
+
+ if (!dev)
+ goto exit_gen_lnode;
+
+ cur_dev = to_msm_bus_node(dev);
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ goto exit_gen_lnode;
+ }
+
+ if (!cur_dev->num_lnodes) {
+ cur_dev->lnode_list = devm_kzalloc(dev,
+ sizeof(struct link_node) * NUM_LNODES,
+ GFP_KERNEL);
+ if (!cur_dev->lnode_list)
+ goto exit_gen_lnode;
+
+ lnode = cur_dev->lnode_list;
+ cur_dev->num_lnodes = NUM_LNODES;
+ lnode_idx = 0;
+ } else {
+ int i;
+
+ for (i = 0; i < cur_dev->num_lnodes; i++) {
+ if (!cur_dev->lnode_list[i].in_use)
+ break;
+ }
+
+ if (i < cur_dev->num_lnodes) {
+ lnode = &cur_dev->lnode_list[i];
+ lnode_idx = i;
+ } else {
+ struct link_node *realloc_list;
+ size_t cur_size = sizeof(struct link_node) *
+ cur_dev->num_lnodes;
+
+ cur_dev->num_lnodes += NUM_LNODES;
+ realloc_list = msm_bus_realloc_devmem(
+ dev,
+ cur_dev->lnode_list,
+ cur_size,
+ sizeof(struct link_node) *
+ cur_dev->num_lnodes, GFP_KERNEL);
+
+ if (!realloc_list)
+ goto exit_gen_lnode;
+
+ cur_dev->lnode_list = realloc_list;
+ lnode = &cur_dev->lnode_list[i];
+ lnode_idx = i;
+ }
+ }
+
+ lnode->in_use = 1;
+ lnode->cl_name = cl_name;
+ if (next_hop == cur_dev->node_info->id) {
+ lnode->next = -1;
+ lnode->next_dev = NULL;
+ } else {
+ lnode->next = prev_idx;
+ lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &next_hop,
+ msm_bus_device_match_adhoc);
+ }
+
+ memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+ memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+ return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+ int lnode_idx)
+{
+ int ret = 0;
+
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ ret = -ENODEV;
+ goto exit_remove_lnode;
+ }
+
+ if (lnode_idx != -1) {
+ if (!cur_dev->num_lnodes ||
+ (lnode_idx > (cur_dev->num_lnodes - 1))) {
+ MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+ __func__, lnode_idx, cur_dev->num_lnodes);
+ ret = -ENODEV;
+ goto exit_remove_lnode;
+ }
+
+ cur_dev->lnode_list[lnode_idx].next = -1;
+ cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+ cur_dev->lnode_list[lnode_idx].in_use = 0;
+ cur_dev->lnode_list[lnode_idx].cl_name = NULL;
+ }
+
+exit_remove_lnode:
+ return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+ struct list_head *black_list, int found,
+ const char *cl_name)
+{
+ struct bus_search_type *search_node, *temp_search_node;
+ struct msm_bus_node_device_type *bus_node;
+ struct list_head *bl_list;
+ struct list_head *temp_bl_list;
+ int search_dev_id = dest;
+ struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &dest,
+ msm_bus_device_match_adhoc);
+ int lnode_hop = -1;
+
+ if (!found)
+ goto reset_links;
+
+ if (!dest_dev) {
+ MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+ goto exit_prune_path;
+ }
+
+ lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
+ bcm_add_bus_req(dest_dev);
+
+ list_for_each_entry_reverse(search_node, route_list, link) {
+ list_for_each_entry(bus_node, &search_node->node_list, link) {
+ unsigned int i;
+
+ for (i = 0; i < bus_node->node_info->num_connections;
+ i++) {
+ if (bus_node->node_info->connections[i] ==
+ search_dev_id) {
+ dest_dev = bus_find_device(
+ &msm_bus_type,
+ NULL,
+ (void *)
+ &bus_node->node_info->id,
+ msm_bus_device_match_adhoc);
+
+ if (!dest_dev) {
+ lnode_hop = -1;
+ goto reset_links;
+ }
+
+ lnode_hop = gen_lnode(dest_dev,
+ search_dev_id,
+ lnode_hop, cl_name);
+ bcm_add_bus_req(dest_dev);
+ search_dev_id =
+ bus_node->node_info->id;
+ break;
+ }
+ }
+ }
+ }
+reset_links:
+ list_for_each_entry_safe(search_node, temp_search_node, route_list,
+ link) {
+ list_for_each_entry(bus_node, &search_node->node_list, link)
+ bus_node->node_info->is_traversed = false;
+
+ list_del(&search_node->link);
+ kfree(search_node);
+ }
+
+ list_for_each_safe(bl_list, temp_bl_list, black_list)
+ list_del(bl_list);
+
+exit_prune_path:
+ return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+ struct list_head *black_list)
+{
+ unsigned int i;
+
+ for (i = 0; i < node->node_info->num_blist; i++) {
+ struct msm_bus_node_device_type *bdev;
+
+ bdev = to_msm_bus_node(node->node_info->black_connections[i]);
+ list_add_tail(&bdev->link, black_list);
+ }
+}
+
+static int getpath(struct device *src_dev, int dest, const char *cl_name)
+{
+ struct list_head traverse_list;
+ struct list_head edge_list;
+ struct list_head route_list;
+ struct list_head black_list;
+ struct msm_bus_node_device_type *src_node;
+ struct bus_search_type *search_node;
+ int found = 0;
+ int depth_index = 0;
+ int first_hop = -1;
+ int src;
+
+ INIT_LIST_HEAD(&traverse_list);
+ INIT_LIST_HEAD(&edge_list);
+ INIT_LIST_HEAD(&route_list);
+ INIT_LIST_HEAD(&black_list);
+
+ if (!src_dev) {
+ MSM_BUS_ERR("%s: Cannot locate src dev ", __func__);
+ goto exit_getpath;
+ }
+
+ src_node = to_msm_bus_node(src_dev);
+ if (!src_node) {
+ MSM_BUS_ERR("%s:Fatal, Source node not found", __func__);
+ goto exit_getpath;
+ }
+ src = src_node->node_info->id;
+ list_add_tail(&src_node->link, &traverse_list);
+
+ while ((!found && !list_empty(&traverse_list))) {
+ struct msm_bus_node_device_type *bus_node = NULL;
+ unsigned int i;
+ /* Locate dest_id in the traverse list */
+ list_for_each_entry(bus_node, &traverse_list, link) {
+ if (bus_node->node_info->id == dest) {
+ found = 1;
+ break;
+ }
+ }
+
+ /* Setup the new edge list */
+ list_for_each_entry(bus_node, &traverse_list, link) {
+ /* Setup list of black-listed nodes */
+ setup_bl_list(bus_node, &black_list);
+
+ for (i = 0; i < bus_node->node_info->num_connections;
+ i++) {
+ bool skip;
+ struct msm_bus_node_device_type
+ *node_conn;
+ node_conn =
+ to_msm_bus_node(
+ bus_node->node_info->dev_connections[i]);
+ if (node_conn->node_info->is_traversed) {
+ MSM_BUS_ERR("Circ Path %d\n",
+ node_conn->node_info->id);
+ goto reset_traversed;
+ }
+ skip = chk_bl_list(&black_list,
+ bus_node->node_info->connections[i]);
+ if (!skip) {
+ list_add_tail(&node_conn->link,
+ &edge_list);
+ node_conn->node_info->is_traversed =
+ true;
+ }
+ }
+ }
+ /* Keep tabs of the previous search list */
+ search_node = kzalloc(sizeof(struct bus_search_type),
+ GFP_KERNEL);
+ INIT_LIST_HEAD(&search_node->node_list);
+ list_splice_init(&traverse_list,
+ &search_node->node_list);
+ /* Add the previous search list to a route list */
+ list_add_tail(&search_node->link, &route_list);
+ /* Advancing the list depth */
+ depth_index++;
+ list_splice_init(&edge_list, &traverse_list);
+ }
+reset_traversed:
+ copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+ first_hop = prune_path(&route_list, dest, src, &black_list, found,
+ cl_name);
+
+exit_getpath:
+ return first_hop;
+}
+
+static void bcm_update_acv_req(struct msm_bus_node_device_type *cur_rsc,
+ uint64_t max_ab, uint64_t max_ib,
+ uint64_t *vec_a, uint64_t *vec_b,
+ uint32_t *acv, int ctx)
+{
+ uint32_t acv_bmsk = 0;
+ /*
+ * Base ACV voting on current RSC until mapping is set up in commanddb
+ * that allows us to vote ACV based on master.
+ */
+
+ if (cur_rsc->node_info->id == MSM_BUS_RSC_APPS)
+ acv_bmsk = BCM_TCS_CMD_ACV_APPS;
+
+ if (max_ab == 0 && max_ib == 0)
+ *acv = *acv & ~acv_bmsk;
+ else
+ *acv = *acv | acv_bmsk;
+ *vec_a = 0;
+ *vec_b = *acv;
+}
+
+static void bcm_update_bus_req(struct device *dev, int ctx)
+{
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ struct msm_bus_node_device_type *bcm_dev = NULL;
+ struct msm_bus_node_device_type *cur_rsc = NULL;
+
+ int i, j;
+ uint64_t max_ib = 0;
+ uint64_t max_ab = 0;
+ int lnode_idx = 0;
+
+ cur_dev = to_msm_bus_node(dev);
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ goto exit_bcm_update_bus_req;
+ }
+
+ if (!cur_dev->node_info->num_bcm_devs)
+ goto exit_bcm_update_bus_req;
+
+ for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+ bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+ if (!bcm_dev)
+ goto exit_bcm_update_bus_req;
+
+ lnode_idx = cur_dev->node_info->bcm_req_idx[i];
+ bcm_dev->lnode_list[lnode_idx].lnode_ib[ctx] =
+ msm_bus_div64(cur_dev->node_bw[ctx].max_ib *
+ (uint64_t)bcm_dev->bcmdev->width,
+ cur_dev->node_info->agg_params.buswidth);
+
+ bcm_dev->lnode_list[lnode_idx].lnode_ab[ctx] =
+ msm_bus_div64(cur_dev->node_bw[ctx].sum_ab *
+ (uint64_t)bcm_dev->bcmdev->width,
+ cur_dev->node_info->agg_params.buswidth *
+ cur_dev->node_info->agg_params.num_aggports);
+
+ for (j = 0; j < bcm_dev->num_lnodes; j++) {
+ if (ctx == ACTIVE_CTX) {
+ max_ib = max(max_ib,
+ max(bcm_dev->lnode_list[j].lnode_ib[ACTIVE_CTX],
+ bcm_dev->lnode_list[j].lnode_ib[DUAL_CTX]));
+ max_ab = max(max_ab,
+ bcm_dev->lnode_list[j].lnode_ab[ACTIVE_CTX] +
+ bcm_dev->lnode_list[j].lnode_ab[DUAL_CTX]);
+ } else {
+ max_ib = max(max_ib,
+ bcm_dev->lnode_list[j].lnode_ib[ctx]);
+ max_ab = max(max_ab,
+ bcm_dev->lnode_list[j].lnode_ab[ctx]);
+ }
+ }
+ bcm_dev->node_bw[ctx].max_ab = max_ab;
+ bcm_dev->node_bw[ctx].max_ib = max_ib;
+
+ max_ab = msm_bus_div64(max_ab, bcm_dev->bcmdev->unit_size);
+ max_ib = msm_bus_div64(max_ib, bcm_dev->bcmdev->unit_size);
+
+ if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+ cur_rsc =
+ to_msm_bus_node(bcm_dev->node_info->rsc_devs[0]);
+ bcm_update_acv_req(cur_rsc, max_ab, max_ib,
+ &bcm_dev->node_vec[ctx].vec_a,
+ &bcm_dev->node_vec[ctx].vec_b,
+ &cur_rsc->rscdev->acv[ctx], ctx);
+
+ } else {
+ bcm_dev->node_vec[ctx].vec_a = max_ab;
+ bcm_dev->node_vec[ctx].vec_b = max_ib;
+ }
+ }
+exit_bcm_update_bus_req:
+ return;
+}
+
+static void bcm_query_bus_req(struct device *dev, int ctx)
+{
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ struct msm_bus_node_device_type *bcm_dev = NULL;
+ struct msm_bus_node_device_type *cur_rsc = NULL;
+ int i, j;
+ uint64_t max_query_ib = 0;
+ uint64_t max_query_ab = 0;
+ int lnode_idx = 0;
+
+ cur_dev = to_msm_bus_node(dev);
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ goto exit_bcm_query_bus_req;
+ }
+
+ if (!cur_dev->node_info->num_bcm_devs)
+ goto exit_bcm_query_bus_req;
+
+ for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+ bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+ if (!bcm_dev)
+ goto exit_bcm_query_bus_req;
+
+ lnode_idx = cur_dev->node_info->bcm_req_idx[i];
+ bcm_dev->lnode_list[lnode_idx].query_ib[ctx] =
+ msm_bus_div64(cur_dev->node_bw[ctx].max_query_ib *
+ (uint64_t)bcm_dev->bcmdev->width,
+ cur_dev->node_info->agg_params.buswidth);
+
+ bcm_dev->lnode_list[lnode_idx].query_ab[ctx] =
+ msm_bus_div64(cur_dev->node_bw[ctx].sum_query_ab *
+ (uint64_t)bcm_dev->bcmdev->width,
+ cur_dev->node_info->agg_params.num_aggports *
+ cur_dev->node_info->agg_params.buswidth);
+
+ for (j = 0; j < bcm_dev->num_lnodes; j++) {
+ if (ctx == ACTIVE_CTX) {
+ max_query_ib = max(max_query_ib,
+ max(bcm_dev->lnode_list[j].query_ib[ACTIVE_CTX],
+ bcm_dev->lnode_list[j].query_ib[DUAL_CTX]));
+
+ max_query_ab = max(max_query_ab,
+ bcm_dev->lnode_list[j].query_ab[ACTIVE_CTX] +
+ bcm_dev->lnode_list[j].query_ab[DUAL_CTX]);
+ } else {
+ max_query_ib = max(max_query_ib,
+ bcm_dev->lnode_list[j].query_ib[ctx]);
+ max_query_ab = max(max_query_ab,
+ bcm_dev->lnode_list[j].query_ab[ctx]);
+ }
+ }
+
+ max_query_ab = msm_bus_div64(max_query_ab,
+ bcm_dev->bcmdev->unit_size);
+ max_query_ib = msm_bus_div64(max_query_ib,
+ bcm_dev->bcmdev->unit_size);
+
+ if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+ cur_rsc =
+ to_msm_bus_node(bcm_dev->node_info->rsc_devs[0]);
+ bcm_update_acv_req(cur_rsc, max_query_ab, max_query_ib,
+ &bcm_dev->node_vec[ctx].query_vec_a,
+ &bcm_dev->node_vec[ctx].query_vec_b,
+ &cur_rsc->rscdev->query_acv[ctx], ctx);
+ } else {
+ bcm_dev->node_vec[ctx].query_vec_a = max_query_ab;
+ bcm_dev->node_vec[ctx].query_vec_b = max_query_ib;
+ }
+
+ bcm_dev->node_bw[ctx].max_query_ab = max_query_ab;
+ bcm_dev->node_bw[ctx].max_query_ib = max_query_ib;
+ }
+exit_bcm_query_bus_req:
+ return;
+}
+
+static void bcm_update_alc_req(struct msm_bus_node_device_type *dev, int ctx)
+{
+ struct msm_bus_node_device_type *bcm_dev = NULL;
+ int i;
+ uint64_t max_alc = 0;
+
+ if (!dev || !to_msm_bus_node(dev->node_info->bus_device)) {
+ MSM_BUS_ERR("Bus node pointer is Invalid");
+ goto exit_bcm_update_alc_req;
+ }
+
+ for (i = 0; i < dev->num_lnodes; i++)
+ max_alc = max(max_alc, dev->lnode_list[i].alc_idx[ctx]);
+
+ dev->node_bw[ctx].max_alc = max_alc;
+
+ bcm_dev = to_msm_bus_node(dev->node_info->bcm_devs[0]);
+
+ if (ctx == ACTIVE_CTX) {
+ max_alc = max(max_alc,
+ max(dev->node_bw[ACTIVE_CTX].max_alc,
+ dev->node_bw[DUAL_CTX].max_alc));
+ } else {
+ max_alc = dev->node_bw[ctx].max_alc;
+ }
+
+ bcm_dev->node_bw[ctx].max_alc = max_alc;
+ bcm_dev->node_vec[ctx].vec_a = max_alc;
+ bcm_dev->node_vec[ctx].vec_b = 0;
+
+exit_bcm_update_alc_req:
+ return;
+}
+
+int bcm_remove_handoff_req(struct device *dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_dev = NULL;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ struct msm_bus_node_device_type *cur_rsc = NULL;
+ int ret = 0;
+
+ bus_dev = to_msm_bus_node(dev);
+ if (bus_dev->node_info->is_bcm_dev ||
+ bus_dev->node_info->is_fab_dev ||
+ bus_dev->node_info->is_rsc_dev)
+ goto exit_bcm_remove_handoff_req;
+
+ if (bus_dev->node_info->num_bcm_devs) {
+ cur_bcm = to_msm_bus_node(bus_dev->node_info->bcm_devs[0]);
+ if (cur_bcm->node_info->num_rsc_devs) {
+ cur_rsc =
+ to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+ if (cur_rsc->node_info->id != MSM_BUS_RSC_APPS)
+ goto exit_bcm_remove_handoff_req;
+ }
+ }
+
+ if (!bus_dev->dirty) {
+ list_add_tail(&bus_dev->link, &late_init_clist);
+ bus_dev->dirty = true;
+ }
+
+exit_bcm_remove_handoff_req:
+ return ret;
+}
+
+static void aggregate_bus_req(struct msm_bus_node_device_type *bus_dev,
+ int ctx)
+{
+ int i;
+ uint64_t max_ib = 0;
+ uint64_t sum_ab = 0;
+
+ if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+ MSM_BUS_ERR("Bus node pointer is Invalid");
+ goto exit_agg_bus_req;
+ }
+
+ for (i = 0; i < bus_dev->num_lnodes; i++) {
+ max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+ sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+ }
+
+ bus_dev->node_bw[ctx].sum_ab = sum_ab;
+ bus_dev->node_bw[ctx].max_ib = max_ib;
+
+exit_agg_bus_req:
+ return;
+}
+
+static void aggregate_bus_query_req(struct msm_bus_node_device_type *bus_dev,
+ int ctx)
+{
+ int i;
+ uint64_t max_ib = 0;
+ uint64_t sum_ab = 0;
+
+ if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+ MSM_BUS_ERR("Bus node pointer is Invalid");
+ goto exit_agg_bus_req;
+ }
+
+ for (i = 0; i < bus_dev->num_lnodes; i++) {
+ max_ib = max(max_ib,
+ bus_dev->lnode_list[i].query_ib[ctx]);
+ sum_ab += bus_dev->lnode_list[i].query_ab[ctx];
+ }
+
+ bus_dev->node_bw[ctx].sum_query_ab = sum_ab;
+ bus_dev->node_bw[ctx].max_query_ib = max_ib;
+
+exit_agg_bus_req:
+ return;
+}
+
+static void commit_data(void)
+{
+ msm_bus_commit_data(&commit_list);
+ INIT_LIST_HEAD(&commit_list);
+}
+
+int commit_late_init_data(bool lock)
+{
+ int rc;
+
+ if (lock) {
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ return 0;
+ }
+
+ rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ bcm_remove_handoff_req);
+
+ msm_bus_commit_data(&late_init_clist);
+ INIT_LIST_HEAD(&late_init_clist);
+
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return rc;
+}
+
+
+
+static void add_node_to_clist(struct msm_bus_node_device_type *node)
+{
+ struct msm_bus_node_device_type *node_parent =
+ to_msm_bus_node(node->node_info->bus_device);
+
+ if (!node->dirty) {
+ list_add_tail(&node->link, &commit_list);
+ node->dirty = true;
+ }
+
+ if (!node_parent->dirty) {
+ list_add_tail(&node_parent->link, &commit_list);
+ node_parent->dirty = true;
+ }
+}
+
+static void add_node_to_query_list(struct msm_bus_node_device_type *node)
+{
+ if (!node->query_dirty) {
+ list_add_tail(&node->query_link, &query_list);
+ node->query_dirty = true;
+ }
+}
+
+static int update_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+ uint64_t act_req_bw, uint64_t slp_req_ib,
+ uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+ int src_idx, int ctx)
+{
+ struct device *next_dev = NULL;
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int curr_idx;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(src_dev)) {
+ MSM_BUS_ERR("%s: No source device", __func__);
+ ret = -ENODEV;
+ goto exit_update_path;
+ }
+
+ next_dev = src_dev;
+
+ if (src_idx < 0) {
+ MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+ curr_idx = src_idx;
+
+ while (next_dev) {
+ int i;
+
+ dev_info = to_msm_bus_node(next_dev);
+
+ if (curr_idx >= dev_info->num_lnodes) {
+ MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+ __func__, curr_idx, dev_info->num_lnodes);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+
+ lnode = &dev_info->lnode_list[curr_idx];
+ if (!lnode) {
+ MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+ __func__, curr_idx);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+ lnode->lnode_ib[ACTIVE_CTX] = act_req_ib;
+ lnode->lnode_ab[ACTIVE_CTX] = act_req_bw;
+ lnode->lnode_ib[DUAL_CTX] = slp_req_ib;
+ lnode->lnode_ab[DUAL_CTX] = slp_req_bw;
+
+ for (i = 0; i < NUM_CTX; i++) {
+ aggregate_bus_req(dev_info, i);
+ bcm_update_bus_req(next_dev, i);
+ }
+
+ add_node_to_clist(dev_info);
+
+ next_dev = lnode->next_dev;
+ curr_idx = lnode->next;
+ }
+
+exit_update_path:
+ return ret;
+}
+
+static int update_alc_vote(struct device *alc_dev, uint64_t act_req_fa_lat,
+ uint64_t act_req_idle_time, uint64_t slp_req_fa_lat,
+ uint64_t slp_req_idle_time, uint64_t cur_fa_lat,
+ uint64_t cur_idle_time, int idx, int ctx)
+{
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int curr_idx, i;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(alc_dev)) {
+ MSM_BUS_ERR("%s: No source device", __func__);
+ ret = -ENODEV;
+ goto exit_update_alc_vote;
+ }
+
+ if (idx < 0) {
+ MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, idx);
+ ret = -ENXIO;
+ goto exit_update_alc_vote;
+ }
+
+ dev_info = to_msm_bus_node(alc_dev);
+ curr_idx = idx;
+
+ if (curr_idx >= dev_info->num_lnodes) {
+ MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+ __func__, curr_idx, dev_info->num_lnodes);
+ ret = -ENXIO;
+ goto exit_update_alc_vote;
+ }
+
+ lnode = &dev_info->lnode_list[curr_idx];
+ if (!lnode) {
+ MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+ __func__, curr_idx);
+ ret = -ENXIO;
+ goto exit_update_alc_vote;
+ }
+
+ /*
+ * Add aggregation and mapping logic once LUT is avail.
+ * Use default values for time being.
+ */
+ lnode->alc_idx[ACTIVE_CTX] = 12;
+ lnode->alc_idx[DUAL_CTX] = 0;
+
+ for (i = 0; i < NUM_CTX; i++)
+ bcm_update_alc_req(dev_info, i);
+
+ add_node_to_clist(dev_info);
+
+exit_update_alc_vote:
+ return ret;
+}
+
+
+static int query_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+ uint64_t act_req_bw, uint64_t slp_req_ib,
+ uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+ int src_idx)
+{
+ struct device *next_dev = NULL;
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int curr_idx;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(src_dev)) {
+ MSM_BUS_ERR("%s: No source device", __func__);
+ ret = -ENODEV;
+ goto exit_query_path;
+ }
+
+ next_dev = src_dev;
+
+ if (src_idx < 0) {
+ MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+ ret = -ENXIO;
+ goto exit_query_path;
+ }
+ curr_idx = src_idx;
+
+ while (next_dev) {
+ int i;
+
+ dev_info = to_msm_bus_node(next_dev);
+
+ if (curr_idx >= dev_info->num_lnodes) {
+ MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+ __func__, curr_idx, dev_info->num_lnodes);
+ ret = -ENXIO;
+ goto exit_query_path;
+ }
+
+ lnode = &dev_info->lnode_list[curr_idx];
+ if (!lnode) {
+ MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+ __func__, curr_idx);
+ ret = -ENXIO;
+ goto exit_query_path;
+ }
+ lnode->query_ib[ACTIVE_CTX] = act_req_ib;
+ lnode->query_ab[ACTIVE_CTX] = act_req_bw;
+ lnode->query_ib[DUAL_CTX] = slp_req_ib;
+ lnode->query_ab[DUAL_CTX] = slp_req_bw;
+
+ for (i = 0; i < NUM_CTX; i++) {
+ aggregate_bus_query_req(dev_info, i);
+ bcm_query_bus_req(next_dev, i);
+ }
+
+ add_node_to_query_list(dev_info);
+
+ next_dev = lnode->next_dev;
+ curr_idx = lnode->next;
+ }
+
+exit_query_path:
+ return ret;
+}
+
+static int remove_path(struct device *src_dev, int dst, uint64_t cur_ib,
+ uint64_t cur_ab, int src_idx, int active_only)
+{
+ struct device *next_dev = NULL;
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int ret = 0;
+ int cur_idx = src_idx;
+ int next_idx;
+
+ /* Update the current path to zero out all request from
+ * this cient on all paths
+ */
+ if (!src_dev) {
+ MSM_BUS_ERR("%s: Can't find source device", __func__);
+ ret = -ENODEV;
+ goto exit_remove_path;
+ }
+
+ ret = update_path(src_dev, dst, 0, 0, 0, 0, cur_ib, cur_ab, src_idx,
+ active_only);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+ __func__, ACTIVE_CTX);
+ goto exit_remove_path;
+ }
+
+ next_dev = src_dev;
+
+ while (next_dev) {
+ dev_info = to_msm_bus_node(next_dev);
+ lnode = &dev_info->lnode_list[cur_idx];
+ next_idx = lnode->next;
+ next_dev = lnode->next_dev;
+ remove_lnode(dev_info, cur_idx);
+ cur_idx = next_idx;
+ }
+
+exit_remove_path:
+ return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+ struct device *dev_node;
+ struct device *dev_it;
+ unsigned int hop = 1;
+ int idx;
+ struct msm_bus_node_device_type *devinfo;
+ int i;
+
+ dev_node = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+
+ if (!dev_node) {
+ MSM_BUS_ERR("SRC NOT FOUND %d", src);
+ return;
+ }
+
+ idx = curr;
+ devinfo = to_msm_bus_node(dev_node);
+ dev_it = dev_node;
+
+ MSM_BUS_ERR("Route list Src %d", src);
+ while (dev_it) {
+ struct msm_bus_node_device_type *busdev =
+ to_msm_bus_node(devinfo->node_info->bus_device);
+
+ MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+ devinfo->node_info->id, active_only);
+
+ for (i = 0; i < NUM_CTX; i++) {
+ MSM_BUS_ERR("dev info sel ib %llu",
+ devinfo->node_bw[i].cur_clk_hz);
+ MSM_BUS_ERR("dev info sel ab %llu",
+ devinfo->node_bw[i].sum_ab);
+ }
+
+ dev_it = devinfo->lnode_list[idx].next_dev;
+ idx = devinfo->lnode_list[idx].next;
+ if (dev_it)
+ devinfo = to_msm_bus_node(dev_it);
+
+ MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+ MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+ if (idx < 0)
+ break;
+ hop++;
+ }
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+ int i;
+ struct msm_bus_scale_pdata *pdata;
+ int lnode, src, curr, dest;
+ uint64_t cur_clk, cur_bw;
+ struct msm_bus_client *client;
+ struct device *src_dev;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+ client = handle_list.cl_list[cl];
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+
+ curr = client->curr;
+ if (curr >= pdata->num_usecases) {
+ MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+ curr = 0;
+ }
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = client->pdata->usecase[curr].vectors[i].src;
+ dest = client->pdata->usecase[curr].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ src_dev = client->src_devs[i];
+ cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+ cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+ remove_path(src_dev, dest, cur_clk, cur_bw, lnode,
+ pdata->active_only);
+ }
+ commit_data();
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+ kfree(client->src_pnode);
+ kfree(client->src_devs);
+ kfree(client);
+ handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static int alloc_handle_lst(int size)
+{
+ int ret = 0;
+ struct msm_bus_client **t_cl_list;
+
+ if (!handle_list.num_entries) {
+ t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+ * NUM_CL_HANDLES, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(t_cl_list)) {
+ ret = -ENOMEM;
+ MSM_BUS_ERR("%s: Failed to allocate handles list",
+ __func__);
+ goto exit_alloc_handle_lst;
+ }
+ handle_list.cl_list = t_cl_list;
+ handle_list.num_entries += NUM_CL_HANDLES;
+ } else {
+ t_cl_list = krealloc(handle_list.cl_list,
+ sizeof(struct msm_bus_client *) *
+ (handle_list.num_entries + NUM_CL_HANDLES),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(t_cl_list)) {
+ ret = -ENOMEM;
+ MSM_BUS_ERR("%s: Failed to allocate handles list",
+ __func__);
+ goto exit_alloc_handle_lst;
+ }
+
+ handle_list.cl_list = t_cl_list;
+ memset(&handle_list.cl_list[handle_list.num_entries], 0,
+ NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+ handle_list.num_entries += NUM_CL_HANDLES;
+ }
+exit_alloc_handle_lst:
+ return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+ uint32_t handle = 0;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < handle_list.num_entries; i++) {
+ if (i && !handle_list.cl_list[i]) {
+ handle = i;
+ break;
+ }
+ }
+
+ if (!handle) {
+ ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to allocate handle list",
+ __func__);
+ goto exit_gen_handle;
+ }
+ handle = i + 1;
+ }
+ handle_list.cl_list[handle] = client;
+exit_gen_handle:
+ return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+ int src, dest;
+ int i;
+ struct msm_bus_client *client = NULL;
+ int *lnode;
+ struct device *dev;
+ uint32_t handle = 0;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+ if (!client) {
+ MSM_BUS_ERR("%s: Error allocating client data", __func__);
+ goto exit_register_client;
+ }
+ client->pdata = pdata;
+
+ if (pdata->alc) {
+ client->curr = -1;
+ lnode = kzalloc(sizeof(int), GFP_KERNEL);
+
+ if (ZERO_OR_NULL_PTR(lnode)) {
+ MSM_BUS_ERR("%s: Error allocating lnode!", __func__);
+ goto exit_lnode_malloc_fail;
+ }
+ client->src_pnode = lnode;
+
+ client->src_devs = kzalloc(sizeof(struct device *),
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(client->src_devs)) {
+ MSM_BUS_ERR("%s: Error allocating src_dev!", __func__);
+ goto exit_src_dev_malloc_fail;
+ }
+ src = MSM_BUS_MAS_ALC;
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+ if (IS_ERR_OR_NULL(dev)) {
+ MSM_BUS_ERR("%s:Failed to find alc device",
+ __func__);
+ goto exit_invalid_data;
+ }
+ gen_lnode(dev, MSM_BUS_MAS_ALC, 0, pdata->name);
+ bcm_add_bus_req(dev);
+
+ client->src_devs[0] = dev;
+
+ handle = gen_handle(client);
+ goto exit_register_client;
+ }
+
+ lnode = kcalloc(pdata->usecase->num_paths, sizeof(int), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(lnode)) {
+ MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+ goto exit_lnode_malloc_fail;
+ }
+ client->src_pnode = lnode;
+
+ client->src_devs = kcalloc(pdata->usecase->num_paths,
+ sizeof(struct device *), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(client->src_devs)) {
+ MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+ goto exit_src_dev_malloc_fail;
+ }
+ client->curr = -1;
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = pdata->usecase->vectors[i].src;
+ dest = pdata->usecase->vectors[i].dst;
+
+ if ((src < 0) || (dest < 0) || (src == dest)) {
+ MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+ __func__, src, dest);
+ goto exit_invalid_data;
+ }
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+ if (IS_ERR_OR_NULL(dev)) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, src, dest);
+ goto exit_invalid_data;
+ }
+ client->src_devs[i] = dev;
+
+ MSM_BUS_ERR("%s:find path.src %d dest %d",
+ __func__, src, dest);
+
+ lnode[i] = getpath(dev, dest, client->pdata->name);
+ if (lnode[i] < 0) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, src, dest);
+ goto exit_invalid_data;
+ }
+ }
+
+ handle = gen_handle(client);
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+ handle);
+ MSM_BUS_ERR("%s:Client handle %d %s", __func__, handle,
+ client->pdata->name);
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return handle;
+exit_invalid_data:
+ kfree(client->src_devs);
+exit_src_dev_malloc_fail:
+ kfree(lnode);
+exit_lnode_malloc_fail:
+ kfree(client);
+exit_register_client:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return handle;
+}
+
+static int update_client_paths(struct msm_bus_client *client, bool log_trns,
+ unsigned int idx)
+{
+ int lnode, src, dest, cur_idx;
+ uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+ int i, ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct device *src_dev;
+
+ if (!client) {
+ MSM_BUS_ERR("Client handle Null");
+ ret = -ENXIO;
+ goto exit_update_client_paths;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("Client pdata Null");
+ ret = -ENXIO;
+ goto exit_update_client_paths;
+ }
+
+ cur_idx = client->curr;
+ client->curr = idx;
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = pdata->usecase[idx].vectors[i].src;
+ dest = pdata->usecase[idx].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ src_dev = client->src_devs[i];
+ req_clk = client->pdata->usecase[idx].vectors[i].ib;
+ req_bw = client->pdata->usecase[idx].vectors[i].ab;
+ if (cur_idx < 0) {
+ curr_clk = 0;
+ curr_bw = 0;
+ } else {
+ curr_clk =
+ client->pdata->usecase[cur_idx].vectors[i].ib;
+ curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+ MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+ curr_bw, curr_clk);
+ }
+
+ if (pdata->active_only) {
+ slp_clk = 0;
+ slp_bw = 0;
+ } else {
+ slp_clk = req_clk;
+ slp_bw = req_bw;
+ req_clk = 0;
+ req_bw = 0;
+ }
+
+ ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
+ slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+ __func__, ret, pdata->active_only);
+ goto exit_update_client_paths;
+ }
+
+ if (log_trns)
+ getpath_debug(src, lnode, pdata->active_only);
+ }
+ commit_data();
+exit_update_client_paths:
+ return ret;
+}
+
+static int update_client_alc(struct msm_bus_client *client, bool log_trns,
+ unsigned int idx)
+{
+ int lnode, cur_idx;
+ uint64_t req_idle_time, req_fal, dual_idle_time, dual_fal,
+ cur_idle_time, cur_fal;
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct device *src_dev;
+
+ if (!client) {
+ MSM_BUS_ERR("Client handle Null");
+ ret = -ENXIO;
+ goto exit_update_client_alc;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("Client pdata Null");
+ ret = -ENXIO;
+ goto exit_update_client_alc;
+ }
+
+ cur_idx = client->curr;
+ client->curr = idx;
+ req_fal = pdata->usecase_lat[idx].fal_ns;
+ req_idle_time = pdata->usecase_lat[idx].idle_t_ns;
+ lnode = client->src_pnode[0];
+ src_dev = client->src_devs[0];
+
+ if (pdata->active_only) {
+ dual_fal = 0;
+ dual_idle_time = 0;
+ } else {
+ dual_fal = req_fal;
+ dual_idle_time = req_idle_time;
+ }
+
+ ret = update_alc_vote(src_dev, req_fal, req_idle_time, dual_fal,
+ dual_idle_time, cur_fal, cur_idle_time, lnode,
+ pdata->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+ __func__, ret, pdata->active_only);
+ goto exit_update_client_alc;
+ }
+ commit_data();
+exit_update_client_alc:
+ return ret;
+}
+
+static int query_usecase(struct msm_bus_client *client, bool log_trns,
+ unsigned int idx,
+ struct msm_bus_tcs_usecase *tcs_usecase)
+{
+ int lnode, src, dest, cur_idx;
+ uint64_t req_clk, req_bw, curr_clk, curr_bw;
+ int i, ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct device *src_dev;
+ struct msm_bus_node_device_type *node = NULL;
+ struct msm_bus_node_device_type *node_tmp = NULL;
+
+ if (!client) {
+ MSM_BUS_ERR("Client handle Null");
+ ret = -ENXIO;
+ goto exit_query_usecase;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("Client pdata Null");
+ ret = -ENXIO;
+ goto exit_query_usecase;
+ }
+
+ cur_idx = client->curr;
+ client->curr = idx;
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = pdata->usecase[idx].vectors[i].src;
+ dest = pdata->usecase[idx].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ src_dev = client->src_devs[i];
+ req_clk = client->pdata->usecase[idx].vectors[i].ib;
+ req_bw = client->pdata->usecase[idx].vectors[i].ab;
+ if (cur_idx < 0) {
+ curr_clk = 0;
+ curr_bw = 0;
+ } else {
+ curr_clk =
+ client->pdata->usecase[cur_idx].vectors[i].ib;
+ curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+ MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+ curr_bw, curr_clk);
+ }
+
+ ret = query_path(src_dev, dest, req_clk, req_bw, 0,
+ 0, curr_clk, curr_bw, lnode);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Query path failed! %d ctx %d\n",
+ __func__, ret, pdata->active_only);
+ goto exit_query_usecase;
+ }
+ }
+ msm_bus_query_gen(&query_list, tcs_usecase);
+ INIT_LIST_HEAD(&query_list);
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = pdata->usecase[idx].vectors[i].src;
+ dest = pdata->usecase[idx].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ src_dev = client->src_devs[i];
+
+ ret = query_path(src_dev, dest, 0, 0, 0, 0,
+ curr_clk, curr_bw, lnode);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Clear query path failed! %d ctx %d\n",
+ __func__, ret, pdata->active_only);
+ goto exit_query_usecase;
+ }
+ }
+
+ list_for_each_entry_safe(node, node_tmp, &query_list, query_link) {
+ node->query_dirty = false;
+ list_del_init(&node->query_link);
+ }
+
+ INIT_LIST_HEAD(&query_list);
+
+exit_query_usecase:
+ return ret;
+}
+
+static int update_context(uint32_t cl, bool active_only,
+ unsigned int ctx_idx)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct msm_bus_client *client;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ client = handle_list.cl_list[cl];
+ if (!client) {
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+ if (pdata->active_only == active_only) {
+ MSM_BUS_ERR("No change in context(%d==%d), skip\n",
+ pdata->active_only, active_only);
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ if (ctx_idx >= pdata->num_usecases) {
+ MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+ cl, ctx_idx);
+ ret = -ENXIO;
+ goto exit_update_context;
+ }
+
+ pdata->active_only = active_only;
+
+ msm_bus_dbg_client_data(client->pdata, ctx_idx, cl);
+ ret = update_client_paths(client, false, ctx_idx);
+ if (ret) {
+ pr_err("%s: Err updating path\n", __func__);
+ goto exit_update_context;
+ }
+
+// trace_bus_update_request_end(pdata->name);
+
+exit_update_context:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct msm_bus_client *client;
+ const char *test_cl = "Null";
+ bool log_transaction = false;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ client = handle_list.cl_list[cl];
+ if (!client) {
+ MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+ __func__);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (index >= pdata->num_usecases) {
+ MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+ cl, index);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (client->curr == index) {
+ MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+ __func__, index);
+ goto exit_update_request;
+ }
+
+ if (!strcmp(test_cl, pdata->name))
+ log_transaction = true;
+
+ MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+ cl, index, client->curr, client->pdata->usecase->num_paths);
+
+ if (pdata->alc)
+ ret = update_client_alc(client, log_transaction, index);
+ else {
+ msm_bus_dbg_client_data(client->pdata, index, cl);
+ ret = update_client_paths(client, log_transaction, index);
+ }
+ if (ret) {
+ pr_err("%s: Err updating path\n", __func__);
+ goto exit_update_request;
+ }
+
+// trace_bus_update_request_end(pdata->name);
+
+exit_update_request:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static int query_client_usecase(struct msm_bus_tcs_usecase *tcs_usecase,
+ uint32_t cl, unsigned int index)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct msm_bus_client *client;
+ const char *test_cl = "Null";
+ bool log_transaction = false;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+ ret = -ENXIO;
+ goto exit_query_client_usecase;
+ }
+
+ client = handle_list.cl_list[cl];
+ if (!client) {
+ MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+ ret = -ENXIO;
+ goto exit_query_client_usecase;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+ __func__);
+ ret = -ENXIO;
+ goto exit_query_client_usecase;
+ }
+
+ if (index >= pdata->num_usecases) {
+ MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+ cl, index);
+ ret = -ENXIO;
+ goto exit_query_client_usecase;
+ }
+
+ if (!strcmp(test_cl, pdata->name))
+ log_transaction = true;
+
+ MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+ cl, index, client->curr, client->pdata->usecase->num_paths);
+ ret = query_usecase(client, log_transaction, index, tcs_usecase);
+ if (ret) {
+ pr_err("%s: Err updating path\n", __func__);
+ goto exit_query_client_usecase;
+ }
+
+// trace_bus_update_request_end(pdata->name);
+
+exit_query_client_usecase:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static int query_client_usecase_all(struct msm_bus_tcs_handle *tcs_handle,
+ uint32_t cl)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ struct msm_bus_client *client;
+ const char *test_cl = "Null";
+ bool log_transaction = false;
+ int i = 0;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+ ret = -ENXIO;
+ goto exit_query_client_usecase_all;
+ }
+
+ client = handle_list.cl_list[cl];
+ if (!client) {
+ MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+ ret = -ENXIO;
+ goto exit_query_client_usecase_all;
+ }
+
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+ __func__);
+ ret = -ENXIO;
+ goto exit_query_client_usecase_all;
+ }
+
+ if (!strcmp(test_cl, pdata->name))
+ log_transaction = true;
+
+ MSM_BUS_ERR("%s: query_start", __func__);
+ for (i = 0; i < pdata->num_usecases; i++)
+ query_usecase(client, log_transaction, i,
+ &tcs_handle->usecases[i]);
+ tcs_handle->num_usecases = pdata->num_usecases;
+
+ if (ret) {
+ pr_err("%s: Err updating path\n", __func__);
+ goto exit_query_client_usecase_all;
+ }
+
+// trace_bus_update_request_end(pdata->name);
+
+exit_query_client_usecase_all:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+ if (cl) {
+ kfree(cl->name);
+ kfree(cl);
+ cl = NULL;
+ }
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+ int ret = 0;
+ char *test_cl = "test-client";
+ bool log_transaction = false;
+ u64 dual_ib, dual_ab, act_ib, act_ab;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (!strcmp(test_cl, cl->name))
+ log_transaction = true;
+
+ msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+ if (cl->active_only) {
+ if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) {
+ MSM_BUS_DBG("%s:no change in request", cl->name);
+ goto exit_update_request;
+ }
+ act_ib = ib;
+ act_ab = ab;
+ dual_ib = 0;
+ dual_ab = 0;
+ } else {
+ if ((cl->cur_dual_ib == ib) && (cl->cur_dual_ab == ab)) {
+ MSM_BUS_DBG("%s:no change in request", cl->name);
+ goto exit_update_request;
+ }
+ dual_ib = ib;
+ dual_ab = ab;
+ act_ib = 0;
+ act_ab = 0;
+ }
+
+ ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, dual_ib,
+ dual_ab, cl->cur_act_ib, cl->cur_act_ab, cl->first_hop,
+ cl->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+ __func__, ret, cl->active_only);
+ goto exit_update_request;
+ }
+
+ commit_data();
+ cl->cur_act_ib = act_ib;
+ cl->cur_act_ab = act_ab;
+ cl->cur_dual_ib = dual_ib;
+ cl->cur_dual_ab = dual_ab;
+
+ if (log_transaction)
+ getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+// trace_bus_update_request_end(cl->name);
+exit_update_request:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+
+ return ret;
+}
+
+static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+ u64 act_ib, u64 dual_ib, u64 dual_ab)
+{
+ int ret = 0;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("Invalid client handle %p", cl);
+ ret = -ENXIO;
+ goto exit_change_context;
+ }
+
+ if ((cl->cur_act_ib == act_ib) &&
+ (cl->cur_act_ab == act_ab) &&
+ (cl->cur_dual_ib == dual_ib) &&
+ (cl->cur_dual_ab == dual_ab)) {
+ MSM_BUS_ERR("No change in vote");
+ goto exit_change_context;
+ }
+
+ if (!dual_ab && !dual_ib)
+ cl->active_only = true;
+ msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_dual_ib);
+ ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, dual_ib,
+ dual_ab, cl->cur_act_ab, cl->cur_act_ab,
+ cl->first_hop, cl->active_only);
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+ __func__, ret, cl->active_only);
+ goto exit_change_context;
+ }
+ commit_data();
+ cl->cur_act_ib = act_ib;
+ cl->cur_act_ab = act_ab;
+ cl->cur_dual_ib = dual_ib;
+ cl->cur_dual_ab = dual_ab;
+// trace_bus_update_request_end(cl->name);
+exit_change_context:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+
+ MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+ remove_path(cl->mas_dev, cl->slv, cl->cur_act_ib, cl->cur_act_ab,
+ cl->first_hop, cl->active_only);
+ commit_data();
+ msm_bus_dbg_remove_client(cl);
+ kfree(cl);
+ MSM_BUS_DBG("%s: Unregistered client", __func__);
+exit_unregister_client:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+ struct msm_bus_client_handle *client = NULL;
+ int len = 0;
+
+ rt_mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!(mas && slv && name)) {
+ pr_err("%s: Error: src dst name num_paths are required\n",
+ __func__);
+ goto exit_register;
+ }
+
+ client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+ if (!client) {
+ MSM_BUS_ERR("%s: Error allocating client data", __func__);
+ goto exit_register;
+ }
+
+ len = strnlen(name, MAX_STR_CL);
+ client->name = kzalloc((len + 1), GFP_KERNEL);
+ if (!client->name) {
+ MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+ strlcpy(client->name, name, MAX_STR_CL);
+ client->active_only = active_only;
+
+ client->mas = mas;
+ client->slv = slv;
+
+ client->mas_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &mas,
+ msm_bus_device_match_adhoc);
+ if (IS_ERR_OR_NULL(client->mas_dev)) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, client->mas, client->slv);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+
+ client->first_hop = getpath(client->mas_dev, client->slv, client->name);
+ if (client->first_hop < 0) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, client->mas, client->slv);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+
+ MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+ client->name);
+ msm_bus_dbg_add_client(client);
+exit_register:
+ rt_mutex_unlock(&msm_bus_adhoc_lock);
+ return client;
+}
+/**
+ * msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ * @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+ arb_ops->register_client = register_client_adhoc;
+ arb_ops->update_request = update_request_adhoc;
+ arb_ops->unregister_client = unregister_client_adhoc;
+ arb_ops->update_context = update_context;
+
+ arb_ops->register_cl = register_adhoc;
+ arb_ops->unregister = unregister_adhoc;
+ arb_ops->update_bw = update_bw_adhoc;
+ arb_ops->update_bw_context = update_bw_context;
+ arb_ops->query_usecase = query_client_usecase;
+ arb_ops->query_usecase_all = query_client_usecase_all;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc.h b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
new file mode 100644
index 0000000..15b61c1
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+struct msm_bus_bimc_params {
+ uint32_t bus_id;
+ uint32_t addr_width;
+ uint32_t data_width;
+ uint32_t nmasters;
+ uint32_t nslaves;
+};
+
+struct msm_bus_bimc_commit {
+ struct msm_bus_node_hw_info *mas;
+ struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_bimc_info {
+ void __iomem *base;
+ uint32_t base_addr;
+ uint32_t qos_freq;
+ struct msm_bus_bimc_params params;
+ struct msm_bus_bimc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_bimc_node {
+ uint32_t conn_mask;
+ uint32_t data_width;
+ uint8_t slv_arb_mode;
+};
+
+enum msm_bus_bimc_arb_mode {
+ BIMC_ARB_MODE_RR = 0,
+ BIMC_ARB_MODE_PRIORITY_RR,
+ BIMC_ARB_MODE_TIERED_RR,
+};
+
+
+enum msm_bus_bimc_interleave {
+ BIMC_INTERLEAVE_NONE = 0,
+ BIMC_INTERLEAVE_ODD,
+ BIMC_INTERLEAVE_EVEN,
+};
+
+struct msm_bus_bimc_slave_seg {
+ bool enable;
+ uint64_t start_addr;
+ uint64_t seg_size;
+ uint8_t interleave;
+};
+
+enum msm_bus_bimc_qos_mode_type {
+ BIMC_QOS_MODE_FIXED = 0,
+ BIMC_QOS_MODE_LIMITER,
+ BIMC_QOS_MODE_BYPASS,
+ BIMC_QOS_MODE_REGULATOR,
+};
+
+struct msm_bus_bimc_qos_health {
+ bool limit_commands;
+ uint32_t areq_prio;
+ uint32_t prio_level;
+};
+
+struct msm_bus_bimc_mode_fixed {
+ uint32_t prio_level;
+ uint32_t areq_prio_rd;
+ uint32_t areq_prio_wr;
+};
+
+struct msm_bus_bimc_mode_rl {
+ uint8_t qhealthnum;
+ struct msm_bus_bimc_qos_health qhealth[4];
+};
+
+struct msm_bus_bimc_qos_mode {
+ uint8_t mode;
+ struct msm_bus_bimc_mode_fixed fixed;
+ struct msm_bus_bimc_mode_rl rl;
+};
+
+struct msm_bus_bimc_qos_bw {
+ uint64_t bw; /* bw is in Bytes/sec */
+ uint32_t ws; /* Window size in nano seconds*/
+ int64_t thh; /* Threshold high, bytes per second */
+ int64_t thm; /* Threshold medium, bytes per second */
+ int64_t thl; /* Threshold low, bytes per second */
+ u32 gp; /* Grant Period in micro seconds */
+ u32 thmp; /* Threshold medium in percentage */
+};
+
+struct msm_bus_bimc_clk_gate {
+ bool core_clk_gate_en;
+ bool arb_clk_gate_en; /* For arbiter */
+ bool port_clk_gate_en; /* For regs on BIMC core clock */
+};
+
+void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, uint32_t seg_index,
+ struct msm_bus_bimc_slave_seg *bsseg);
+void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
+ uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, bool en);
+void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo,
+ struct msm_bus_bimc_params *params);
+void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo,
+ uint32_t mas_index, struct msm_bus_bimc_node *mparams);
+void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, struct msm_bus_bimc_node *sparams);
+bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index);
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
new file mode 100644
index 0000000..f180781
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+/* M_Generic */
+
+enum bke_sw {
+ BKE_OFF = 0,
+ BKE_ON = 1,
+};
+
+#define M_REG_BASE(b) ((b) + 0x00008000)
+
+#define M_MODE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+ M_MODE_RMSK = 0xf0000011,
+ M_MODE_WR_GATHER_BEATS_BMSK = 0xf0000000,
+ M_MODE_WR_GATHER_BEATS_SHFT = 0x1c,
+ M_MODE_NARROW_WR_BMSK = 0x10,
+ M_MODE_NARROW_WR_SHFT = 0x4,
+ M_MODE_ORDERING_MODEL_BMSK = 0x1,
+ M_MODE_ORDERING_MODEL_SHFT = 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+ M_PRIOLVL_OVERRIDE_RMSK = 0x301,
+ M_PRIOLVL_OVERRIDE_BMSK = 0x300,
+ M_PRIOLVL_OVERRIDE_SHFT = 0x8,
+ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK = 0x1,
+ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+ M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
+ M_RD_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
+ M_RD_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
+ M_RD_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
+ M_RD_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa,
+ M_RD_CMD_OVERRIDE_AOOO_BMSK = 0x200,
+ M_RD_CMD_OVERRIDE_AOOO_SHFT = 0x9,
+ M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
+ M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+ M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
+ M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
+ M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
+ M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
+ M_WR_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa,
+ M_WR_CMD_OVERRIDE_AOOO_BMSK = 0x200,
+ M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9,
+ M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
+ M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+ M_BKE_EN_RMSK = 0x1,
+ M_BKE_EN_EN_BMSK = 0x1,
+ M_BKE_EN_EN_SHFT = 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+ M_BKE_GP_RMSK = 0x3ff,
+ M_BKE_GP_GP_BMSK = 0x3ff,
+ M_BKE_GP_GP_SHFT = 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+ M_BKE_GC_RMSK = 0xffff,
+ M_BKE_GC_GC_BMSK = 0xffff,
+ M_BKE_GC_GC_SHFT = 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+ M_BKE_THH_RMSK = 0xffff,
+ M_BKE_THH_THRESH_BMSK = 0xffff,
+ M_BKE_THH_THRESH_SHFT = 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+ M_BKE_THM_RMSK = 0xffff,
+ M_BKE_THM_THRESH_BMSK = 0xffff,
+ M_BKE_THM_THRESH_SHFT = 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+ M_BKE_THL_RMSK = 0xffff,
+ M_BKE_THL_THRESH_BMSK = 0xffff,
+ M_BKE_THL_THRESH_SHFT = 0x0,
+};
+
+#define NUM_HEALTH_LEVEL (4)
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+ M_BKE_HEALTH_0_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+ M_BKE_HEALTH_1_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+ M_BKE_HEALTH_2_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+ M_BKE_HEALTH_3_CONFIG_RMSK = 0x303,
+ M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+ (M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+ ((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+ (((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+ (((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+ (M_BKE_GP_GP_BMSK >> \
+ M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+ (M_BKE_GC_GC_BMSK >> \
+ (M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(uint64_t *a, uint32_t b)
+{
+ if ((*a > 0) && (*a < b)) {
+ *a = 0;
+ return 1;
+ } else {
+ return do_div(*a, b);
+ }
+}
+
+static void set_bke_en(void __iomem *addr, uint32_t index,
+ bool req)
+{
+ uint32_t old_val, new_val;
+
+ old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index));
+ new_val = req << M_BKE_EN_EN_SHFT;
+ if ((old_val & M_BKE_EN_RMSK) == (new_val))
+ return;
+ writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val &
+ M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index));
+ /* Ensure that BKE register is programmed set before returning */
+ wmb();
+}
+
+static void set_health_reg(void __iomem *addr, uint32_t rmsk,
+ uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+ uint32_t reg_val, val0, val;
+
+ /* Note, addr is already passed with right mas_index */
+ reg_val = readl_relaxed(addr) & rmsk;
+ val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+ qmode->rl.qhealth[index].areq_prio,
+ qmode->rl.qhealth[index].prio_level);
+ val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+ writel_relaxed(val, addr);
+ /*
+ * Ensure that priority for regulator/limiter modes are
+ * set before returning
+ */
+ wmb();
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+ uint32_t mas_index, uint8_t qmode_sel,
+ struct msm_bus_bimc_qos_mode *qmode)
+{
+
+ switch (qmode_sel) {
+ case BIMC_QOS_MODE_FIXED:
+ case BIMC_QOS_MODE_REGULATOR:
+ case BIMC_QOS_MODE_LIMITER:
+ set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+ set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+ set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+ set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0, qmode);
+ set_bke_en(base, mas_index, true);
+ break;
+ case BIMC_QOS_MODE_BYPASS:
+ set_bke_en(base, mas_index, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+ int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+ uint32_t gc)
+{
+ int32_t reg_val, val;
+ int32_t bke_reg_val;
+ int16_t val2;
+
+ /* Disable BKE before writing to registers as per spec */
+ bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+ writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+ M_BKE_EN_ADDR(baddr, mas_index));
+
+ /* Write values of registers calculated */
+ reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+ & M_BKE_GP_RMSK;
+ val = gp << M_BKE_GP_GP_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+ M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+ M_BKE_GC_RMSK;
+ val = gc << M_BKE_GC_GC_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+ M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+ M_BKE_THH_RMSK;
+ val = th << M_BKE_THH_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+ M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+ M_BKE_THM_RMSK;
+ val2 = tm << M_BKE_THM_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+ M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+ M_BKE_THL_RMSK;
+ val2 = tl << M_BKE_THL_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+ (val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+ mas_index));
+
+ /* Ensure that all bandwidth register writes have completed
+ * before returning
+ */
+ wmb();
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+ int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+ int32_t bw_mbps, thh = 0, thm, thl, gc;
+ int32_t gp;
+ u64 temp;
+
+ if (qos_freq == 0) {
+ MSM_BUS_DBG("No QoS Frequency.\n");
+ return;
+ }
+
+ if (!(qbw->bw && qbw->gp)) {
+ MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+ return;
+ }
+
+ /* Convert bandwidth to MBPS */
+ temp = qbw->bw;
+ bimc_div(&temp, 1000000);
+ bw_mbps = temp;
+
+ /* Grant period in clock cycles
+ * Grant period from bandwidth structure
+ * is in nano seconds, QoS freq is in KHz.
+ * Divide by 1000 to get clock cycles.
+ */
+ gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+ /* Grant count = BW in MBps * Grant period
+ * in micro seconds
+ */
+ gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+ gc = min(gc, MAX_GC);
+
+ /* Medium threshold = -((Medium Threshold percentage *
+ * Grant count) / 100)
+ */
+ thm = -((qbw->thmp * gc) / 100);
+ qbw->thm = thm;
+
+ /* Low threshold = -(Grant count) */
+ thl = -gc;
+ qbw->thl = thl;
+
+ MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+ __func__, gp, gc, thm, thl, thh);
+
+ trace_bus_bke_params(gc, gp, thl, thm, thl);
+ set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq,
+ int enable_lim, u64 lim_bw)
+{
+ int mode;
+ int i;
+ struct msm_bus_bimc_qos_mode qmode = {0};
+
+ if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+ MSM_BUS_DBG("No QoS Ports to limit\n");
+ return 0;
+ }
+
+ if ((enable_lim == THROTTLE_ON) && lim_bw) {
+ mode = BIMC_QOS_MODE_LIMITER;
+
+ qmode.rl.qhealth[0].limit_commands = true;
+ qmode.rl.qhealth[1].limit_commands = false;
+ qmode.rl.qhealth[2].limit_commands = false;
+ qmode.rl.qhealth[3].limit_commands = false;
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ struct msm_bus_bimc_qos_bw qbw;
+ /* If not in fixed mode, update bandwidth */
+ if (info->node_info->lim_bw != lim_bw) {
+ qbw.ws = info->node_info->qos_params.ws;
+ qbw.bw = lim_bw;
+ qbw.gp = info->node_info->qos_params.gp;
+ qbw.thmp = info->node_info->qos_params.thmp;
+ bimc_set_static_qos_bw(qos_base, qos_freq,
+ info->node_info->qport[i], &qbw);
+ }
+ }
+ info->node_info->lim_bw = lim_bw;
+ } else {
+ mode = info->node_info->qos_params.mode;
+ if (mode != BIMC_QOS_MODE_BYPASS) {
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+ }
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++)
+ msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+ mode, &qmode);
+ return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+ bool ret = false;
+
+ if ((mode == BIMC_QOS_MODE_LIMITER)
+ || (mode == BIMC_QOS_MODE_REGULATOR))
+ ret = true;
+
+ return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ int i;
+ struct msm_bus_bimc_qos_mode qmode = {0};
+
+ if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ return 0;
+ }
+
+ switch (info->node_info->qos_params.mode) {
+ /* For now Fixed and regulator are handled the same way. */
+ case BIMC_QOS_MODE_FIXED:
+ case BIMC_QOS_MODE_REGULATOR:
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+ break;
+ case BIMC_QOS_MODE_LIMITER:
+ qmode.rl.qhealth[0].limit_commands = true;
+ qmode.rl.qhealth[1].limit_commands = false;
+ qmode.rl.qhealth[2].limit_commands = false;
+ qmode.rl.qhealth[3].limit_commands = false;
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+ break;
+ default:
+ break;
+ }
+
+
+ for (i = 0; i < info->node_info->num_qports; i++)
+ msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+ info->node_info->qos_params.mode, &qmode);
+
+ return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq)
+{
+ struct msm_bus_bimc_qos_bw qbw;
+ struct msm_bus_bimc_qos_mode qmode = {0};
+ int i;
+ int64_t bw = 0;
+ int ret = 0;
+ struct msm_bus_node_info_type *info = dev->node_info;
+ int mode;
+
+ if (info && info->num_qports &&
+ ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
+ bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab,
+ info->num_qports);
+
+ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+ info->id, bw);
+
+ if (!info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ goto exit_set_bw;
+ }
+
+ qbw.bw = bw + info->qos_params.bw_buffer;
+ trace_bus_bimc_config_limiter(info->id, bw);
+
+ /* Default to gp of 5us */
+ qbw.gp = (info->qos_params.gp ?
+ info->qos_params.gp : 5000);
+ /* Default to thmp of 50% */
+ qbw.thmp = (info->qos_params.thmp ?
+ info->qos_params.thmp : 50);
+ /*
+ * If the BW vote is 0 then set the QoS mode to
+ * Fixed/0/0.
+ */
+ if (bw) {
+ qmode.rl.qhealth[0].limit_commands = true;
+ qmode.rl.qhealth[1].limit_commands = false;
+ qmode.rl.qhealth[2].limit_commands = false;
+ qmode.rl.qhealth[3].limit_commands = false;
+ mode = info->qos_params.mode;
+ } else {
+ mode = BIMC_QOS_MODE_FIXED;
+ }
+
+ for (i = 0; i < info->num_qports; i++) {
+ msm_bus_bimc_set_qos_prio(qos_base,
+ info->qport[i], mode, &qmode);
+ if (bw)
+ bimc_set_static_qos_bw(qos_base, qos_freq,
+ info->qport[i], &qbw);
+ }
+ }
+exit_set_bw:
+ return ret;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ if (!bus_dev)
+ return -ENODEV;
+ bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+ bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+ bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+ bus_dev->fabdev->noc_ops.update_bw_reg =
+ msm_bus_bimc_update_bw_reg;
+ return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
new file mode 100644
index 0000000..1c4546a
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+
+/* M_Generic */
+
+enum bke_sw {
+ BKE_OFF = 0,
+ BKE_ON = 1,
+};
+
+#define M_REG_BASE(b) ((b) + 0x00008000)
+
+#define M_MODE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+ M_MODE_RMSK = 0xf0000011,
+ M_MODE_WR_GATHER_BEATS_BMSK = 0xf0000000,
+ M_MODE_WR_GATHER_BEATS_SHFT = 0x1c,
+ M_MODE_NARROW_WR_BMSK = 0x10,
+ M_MODE_NARROW_WR_SHFT = 0x4,
+ M_MODE_ORDERING_MODEL_BMSK = 0x1,
+ M_MODE_ORDERING_MODEL_SHFT = 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+ M_PRIOLVL_OVERRIDE_RMSK = 0x301,
+ M_PRIOLVL_OVERRIDE_BMSK = 0x300,
+ M_PRIOLVL_OVERRIDE_SHFT = 0x8,
+ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK = 0x1,
+ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+ M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
+ M_RD_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
+ M_RD_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
+ M_RD_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
+ M_RD_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa,
+ M_RD_CMD_OVERRIDE_AOOO_BMSK = 0x200,
+ M_RD_CMD_OVERRIDE_AOOO_SHFT = 0x9,
+ M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
+ M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+ M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
+ M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
+ M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
+ M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
+ M_WR_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa,
+ M_WR_CMD_OVERRIDE_AOOO_BMSK = 0x200,
+ M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9,
+ M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
+ M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+ M_BKE_EN_RMSK = 0x1,
+ M_BKE_EN_EN_BMSK = 0x1,
+ M_BKE_EN_EN_SHFT = 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+ M_BKE_GP_RMSK = 0x3ff,
+ M_BKE_GP_GP_BMSK = 0x3ff,
+ M_BKE_GP_GP_SHFT = 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+ M_BKE_GC_RMSK = 0xffff,
+ M_BKE_GC_GC_BMSK = 0xffff,
+ M_BKE_GC_GC_SHFT = 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+ M_BKE_THH_RMSK = 0xffff,
+ M_BKE_THH_THRESH_BMSK = 0xffff,
+ M_BKE_THH_THRESH_SHFT = 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+ M_BKE_THM_RMSK = 0xffff,
+ M_BKE_THM_THRESH_BMSK = 0xffff,
+ M_BKE_THM_THRESH_SHFT = 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+ M_BKE_THL_RMSK = 0xffff,
+ M_BKE_THL_THRESH_BMSK = 0xffff,
+ M_BKE_THL_THRESH_SHFT = 0x0,
+};
+
+#define NUM_HEALTH_LEVEL (4)
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+ M_BKE_HEALTH_0_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+ M_BKE_HEALTH_1_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+ M_BKE_HEALTH_2_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+ M_BKE_HEALTH_3_CONFIG_RMSK = 0x303,
+ M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+ (M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+ ((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+ (((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+ (((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+ (M_BKE_GP_GP_BMSK >> \
+ M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+ (M_BKE_GC_GC_BMSK >> \
+ (M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(uint64_t *a, uint32_t b)
+{
+ if ((*a > 0) && (*a < b)) {
+ *a = 0;
+ return 1;
+ } else {
+ return do_div(*a, b);
+ }
+}
+
+static void set_bke_en(void __iomem *addr, uint32_t index,
+ bool req)
+{
+ uint32_t old_val, new_val;
+
+ old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index));
+ new_val = req << M_BKE_EN_EN_SHFT;
+ if ((old_val & M_BKE_EN_RMSK) == (new_val))
+ return;
+ writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val &
+ M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index));
+ /* Ensure that BKE register is programmed set before returning */
+ wmb();
+}
+
+static void set_health_reg(void __iomem *addr, uint32_t rmsk,
+ uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+ uint32_t reg_val, val0, val;
+
+ /* Note, addr is already passed with right mas_index */
+ reg_val = readl_relaxed(addr) & rmsk;
+ val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+ qmode->rl.qhealth[index].areq_prio,
+ qmode->rl.qhealth[index].prio_level);
+ val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+ writel_relaxed(val, addr);
+ /*
+ * Ensure that priority for regulator/limiter modes are
+ * set before returning
+ */
+ wmb();
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+ uint32_t mas_index, uint8_t qmode_sel,
+ struct msm_bus_bimc_qos_mode *qmode)
+{
+
+ switch (qmode_sel) {
+ case BIMC_QOS_MODE_FIXED:
+ case BIMC_QOS_MODE_REGULATOR:
+ case BIMC_QOS_MODE_LIMITER:
+ set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+ set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+ set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+ set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0, qmode);
+ set_bke_en(base, mas_index, true);
+ break;
+ case BIMC_QOS_MODE_BYPASS:
+ set_bke_en(base, mas_index, false);
+ break;
+ default:
+ break;
+ }
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+ int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+ uint32_t gc)
+{
+ int32_t reg_val, val;
+ int32_t bke_reg_val;
+ int16_t val2;
+
+ /* Disable BKE before writing to registers as per spec */
+ bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+ writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+ M_BKE_EN_ADDR(baddr, mas_index));
+
+ /* Write values of registers calculated */
+ reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+ & M_BKE_GP_RMSK;
+ val = gp << M_BKE_GP_GP_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+ M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+ M_BKE_GC_RMSK;
+ val = gc << M_BKE_GC_GC_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+ M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+ M_BKE_THH_RMSK;
+ val = th << M_BKE_THH_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+ M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+ M_BKE_THM_RMSK;
+ val2 = tm << M_BKE_THM_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+ M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+ M_BKE_THL_RMSK;
+ val2 = tl << M_BKE_THL_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+ (val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+ mas_index));
+
+ /* Ensure that all bandwidth register writes have completed
+ * before returning
+ */
+ wmb();
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+ int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+ int32_t bw_mbps, thh = 0, thm, thl, gc;
+ int32_t gp;
+ u64 temp;
+
+ if (qos_freq == 0) {
+ MSM_BUS_DBG("No QoS Frequency.\n");
+ return;
+ }
+
+ if (!(qbw->bw && qbw->gp)) {
+ MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+ return;
+ }
+
+ /* Convert bandwidth to MBPS */
+ temp = qbw->bw;
+ bimc_div(&temp, 1000000);
+ bw_mbps = temp;
+
+ /* Grant period in clock cycles
+ * Grant period from bandwidth structure
+ * is in nano seconds, QoS freq is in KHz.
+ * Divide by 1000 to get clock cycles.
+ */
+ gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+ /* Grant count = BW in MBps * Grant period
+ * in micro seconds
+ */
+ gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+ gc = min(gc, MAX_GC);
+
+ /* Medium threshold = -((Medium Threshold percentage *
+ * Grant count) / 100)
+ */
+ thm = -((qbw->thmp * gc) / 100);
+ qbw->thm = thm;
+
+ /* Low threshold = -(Grant count) */
+ thl = -gc;
+ qbw->thl = thl;
+
+ MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+ __func__, gp, gc, thm, thl, thh);
+
+ set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq,
+ int enable_lim, u64 lim_bw)
+{
+ int mode;
+ int i;
+ struct msm_bus_bimc_qos_mode qmode = {0};
+
+ if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+ MSM_BUS_DBG("No QoS Ports to limit\n");
+ return 0;
+ }
+
+ if ((enable_lim == THROTTLE_ON) && lim_bw) {
+ mode = BIMC_QOS_MODE_LIMITER;
+
+ qmode.rl.qhealth[0].limit_commands = true;
+ qmode.rl.qhealth[1].limit_commands = false;
+ qmode.rl.qhealth[2].limit_commands = false;
+ qmode.rl.qhealth[3].limit_commands = false;
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ struct msm_bus_bimc_qos_bw qbw;
+ /* If not in fixed mode, update bandwidth */
+ if (info->node_info->lim_bw != lim_bw) {
+ qbw.ws = info->node_info->qos_params.ws;
+ qbw.bw = lim_bw;
+ qbw.gp = info->node_info->qos_params.gp;
+ qbw.thmp = info->node_info->qos_params.thmp;
+ bimc_set_static_qos_bw(qos_base, qos_freq,
+ info->node_info->qport[i], &qbw);
+ }
+ }
+ info->node_info->lim_bw = lim_bw;
+ } else {
+ mode = info->node_info->qos_params.mode;
+ if (mode != BIMC_QOS_MODE_BYPASS) {
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+ }
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++)
+ msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+ mode, &qmode);
+ return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+ bool ret = false;
+
+ if ((mode == BIMC_QOS_MODE_LIMITER)
+ || (mode == BIMC_QOS_MODE_REGULATOR))
+ ret = true;
+
+ return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ int i;
+ struct msm_bus_bimc_qos_mode qmode = {0};
+
+ if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ return 0;
+ }
+
+ switch (info->node_info->qos_params.mode) {
+ /* For now Fixed and regulator are handled the same way. */
+ case BIMC_QOS_MODE_FIXED:
+ case BIMC_QOS_MODE_REGULATOR:
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+ break;
+ case BIMC_QOS_MODE_LIMITER:
+ qmode.rl.qhealth[0].limit_commands = true;
+ qmode.rl.qhealth[1].limit_commands = false;
+ qmode.rl.qhealth[2].limit_commands = false;
+ qmode.rl.qhealth[3].limit_commands = false;
+ for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+ qmode.rl.qhealth[i].prio_level =
+ info->node_info->qos_params.prio_lvl;
+ qmode.rl.qhealth[i].areq_prio =
+ info->node_info->qos_params.prio_rd;
+ }
+ break;
+ default:
+ break;
+ }
+
+
+ for (i = 0; i < info->node_info->num_qports; i++)
+ msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+ info->node_info->qos_params.mode, &qmode);
+
+ return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq)
+{
+ struct msm_bus_bimc_qos_bw qbw;
+ struct msm_bus_bimc_qos_mode qmode = {0};
+ int i;
+ int64_t bw = 0;
+ int ret = 0;
+ struct msm_bus_node_info_type *info = dev->node_info;
+ int mode;
+
+ if (info && info->num_qports &&
+ ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
+ bw = msm_bus_div64(info->num_qports,
+ dev->node_bw[ACTIVE_CTX].sum_ab);
+
+ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+ info->id, bw);
+
+ if (!info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ goto exit_set_bw;
+ }
+
+ qbw.bw = bw + info->qos_params.bw_buffer;
+
+ /* Default to gp of 5us */
+ qbw.gp = (info->qos_params.gp ?
+ info->qos_params.gp : 5000);
+ /* Default to thmp of 50% */
+ qbw.thmp = (info->qos_params.thmp ?
+ info->qos_params.thmp : 50);
+ /*
+ * If the BW vote is 0 then set the QoS mode to
+ * Fixed/0/0.
+ */
+ if (bw) {
+ qmode.rl.qhealth[0].limit_commands = true;
+ qmode.rl.qhealth[1].limit_commands = false;
+ qmode.rl.qhealth[2].limit_commands = false;
+ qmode.rl.qhealth[3].limit_commands = false;
+ mode = info->qos_params.mode;
+ } else {
+ mode = BIMC_QOS_MODE_FIXED;
+ }
+
+ for (i = 0; i < info->num_qports; i++) {
+ msm_bus_bimc_set_qos_prio(qos_base,
+ info->qport[i], mode, &qmode);
+ if (bw)
+ bimc_set_static_qos_bw(qos_base, qos_freq,
+ info->qport[i], &qbw);
+ }
+ }
+exit_set_bw:
+ return ret;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ if (!bus_dev)
+ return -ENODEV;
+ bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+ bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+ bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+ bus_dev->fabdev->noc_ops.update_bw_reg =
+ msm_bus_bimc_update_bw_reg;
+ return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_client_api.c b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
new file mode 100644
index 0000000..d4443f3
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_arb_ops arb_ops;
+
+/**
+ * msm_bus_scale_register_client() - Register the clients with the msm bus
+ * driver
+ * @pdata: Platform data of the client, containing src, dest, ab, ib.
+ * Return non-zero value in case of success, 0 in case of failure.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+ if (arb_ops.register_client)
+ return arb_ops.register_client(pdata);
+ pr_err("%s: Bus driver not ready.\n",
+ __func__);
+ return 0;
+}
+EXPORT_SYMBOL(msm_bus_scale_register_client);
+
+/**
+ * msm_bus_scale_client_update_request() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+ if (arb_ops.update_request)
+ return arb_ops.update_request(cl, index);
+ pr_err("%s: Bus driver not ready.\n",
+ __func__);
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_request);
+
+/**
+ * msm_bus_scale_client_update_context() - Update the context for a client
+ * cl: Handle to the client
+ * active_only: Bool to indicate dual context or active-only context.
+ * ctx_idx: Voting index to be used when switching contexts.
+ */
+int msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+ unsigned int ctx_idx)
+{
+ if (arb_ops.update_context)
+ return arb_ops.update_context(cl, active_only, ctx_idx);
+
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_context);
+
+/**
+ * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
+ * @cl: Handle to the client
+ */
+void msm_bus_scale_unregister_client(uint32_t cl)
+{
+ if (arb_ops.unregister_client)
+ arb_ops.unregister_client(cl);
+ else {
+ pr_err("%s: Bus driver not ready.\n",
+ __func__);
+ }
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister_client);
+
+/**
+ * msm_bus_scale_register() - Register the clients with the msm bus
+ * driver
+ *
+ * @mas: Master ID
+ * @slv: Slave ID
+ * @name: descriptive name for this client
+ * @active_only: Whether or not this bandwidth vote should only be
+ * effective while the application processor is active.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+ if (arb_ops.register_cl)
+ return arb_ops.register_cl(mas, slv, name, active_only);
+ pr_err("%s: Bus driver not ready.\n",
+ __func__);
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(msm_bus_scale_register);
+
+/**
+ * msm_bus_scale_client_update_bw() - Update the request for bandwidth
+ * from a particular client
+ *
+ * @cl: Handle to the client
+ * @ab: Arbitrated bandwidth being requested
+ * @ib: Instantaneous bandwidth being requested
+ */
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+ if (arb_ops.update_request)
+ return arb_ops.update_bw(cl, ab, ib);
+ pr_err("%s: Bus driver not ready.\n", __func__);
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw);
+
+/**
+ * msm_bus_scale_change_context() - Update the context for a particular client
+ * cl: Handle to the client
+ * act_ab: The average bandwidth(AB) in Bytes/s to be used in active context.
+ * act_ib: The instantaneous bandwidth(IB) in Bytes/s to be used in active
+ * context.
+ * dual_ib: The average bandwidth(AB) in Bytes/s to be used in dual context.
+ * dual_ab: The instantaneous bandwidth(IB) in Bytes/s to be used in dual
+ * context.
+ */
+int
+msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+ u64 act_ib, u64 dual_ib, u64 dual_ab)
+{
+ if (arb_ops.update_context)
+ return arb_ops.update_bw_context(cl, act_ab, act_ib,
+ dual_ab, dual_ib);
+
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw_context);
+
+/**
+ * msm_bus_scale_unregister() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ */
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+ if (arb_ops.unregister)
+ arb_ops.unregister(cl);
+ else
+ pr_err("%s: Bus driver not ready.\n",
+ __func__);
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister);
+
+/**
+ * msm_bus_scale_query_tcs_cmd() - Query for a list of TCS commands for
+ * an aggregated votes of paths from a single usecase.
+ *
+ * tcs_usecase: pointer to client allocated memory blob
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_query_tcs_cmd(struct msm_bus_tcs_usecase *tcs_usecase,
+ uint32_t cl, unsigned int index)
+{
+ if (arb_ops.query_usecase)
+ return arb_ops.query_usecase(tcs_usecase, cl, index);
+ pr_err("%s: Bus driver not ready.\n",
+ __func__);
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_query_tcs_cmd);
+
+/**
+ * msm_bus_scale_query_tcs_cmd_all() - Query for a list of TCS commands for
+ * an aggregated vote of paths for all usecases registered by client
+ *
+ * tcs_handle: pointer to client allocated memory blob
+ * cl: Handle to the client
+ *
+ */
+int msm_bus_scale_query_tcs_cmd_all(struct msm_bus_tcs_handle *tcs_handle,
+ uint32_t cl)
+{
+ if (arb_ops.query_usecase)
+ return arb_ops.query_usecase_all(tcs_handle, cl);
+ pr_err("%s: Bus driver not ready.\n",
+ __func__);
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_query_tcs_cmd_all);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.c b/drivers/soc/qcom/msm_bus/msm_bus_core.c
new file mode 100644
index 0000000..6dfb3a0
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+static atomic_t num_fab = ATOMIC_INIT(0);
+
+int msm_bus_get_num_fab(void)
+{
+ return atomic_read(&num_fab);
+}
+
+int msm_bus_device_match(struct device *dev, void *id)
+{
+ struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+
+ if (!fabdev) {
+ MSM_BUS_WARN("Fabric %p returning 0\n", fabdev);
+ return 0;
+ }
+ return fabdev->id == *(int *)id;
+}
+
+static void msm_bus_release(struct device *device)
+{
+}
+
+struct bus_type msm_bus_type = {
+ .name = "msm-bus-type",
+};
+EXPORT_SYMBOL(msm_bus_type);
+
+/**
+ * msm_bus_get_fabric_device() - This function is used to search for
+ * the fabric device on the bus
+ * @fabid: Fabric id
+ * Function returns: Pointer to the fabric device
+ */
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid)
+{
+ struct device *dev;
+ struct msm_bus_fabric_device *fabric;
+
+ dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid,
+ msm_bus_device_match);
+ if (!dev)
+ return NULL;
+ fabric = to_msm_bus_fabric_device(dev);
+ return fabric;
+}
+
+/**
+ * msm_bus_fabric_device_register() - Registers a fabric on msm bus
+ * @fabdev: Fabric device to be registered
+ */
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev)
+{
+ int ret = 0;
+
+ fabdev->dev.bus = &msm_bus_type;
+ fabdev->dev.release = msm_bus_release;
+ ret = dev_set_name(&fabdev->dev, fabdev->name);
+ if (ret) {
+ MSM_BUS_ERR("error setting dev name\n");
+ goto err;
+ }
+
+ ret = device_register(&fabdev->dev);
+ if (ret < 0) {
+ MSM_BUS_ERR("error registering device%d %s\n",
+ ret, fabdev->name);
+ goto err;
+ }
+ atomic_inc(&num_fab);
+err:
+ return ret;
+}
+
+/**
+ * msm_bus_fabric_device_unregister() - Unregisters the fabric
+ * devices from the msm bus
+ */
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev)
+{
+ device_unregister(&fabdev->dev);
+ atomic_dec(&num_fab);
+}
+
+static void __exit msm_bus_exit(void)
+{
+ bus_unregister(&msm_bus_type);
+}
+
+static int __init msm_bus_init(void)
+{
+ int retval = 0;
+
+ retval = bus_register(&msm_bus_type);
+ if (retval)
+ MSM_BUS_ERR("bus_register error! %d\n",
+ retval);
+ return retval;
+}
+postcore_initcall(msm_bus_init);
+module_exit(msm_bus_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:msm_bus");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
new file mode 100644
index 0000000..0d58c76
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H
+#define _ARCH_ARM_MACH_MSM_BUS_CORE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/radix-tree.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+
+#define MSM_BUS_DBG(msg, ...) \
+ pr_debug(msg, ## __VA_ARGS__)
+#define MSM_BUS_ERR(msg, ...) \
+ pr_err(msg, ## __VA_ARGS__)
+#define MSM_BUS_WARN(msg, ...) \
+ pr_warn(msg, ## __VA_ARGS__)
+#define MSM_FAB_ERR(msg, ...) \
+ dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__)
+
+#define IS_MASTER_VALID(mas) \
+ (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
+ ? 1 : 0)
+#define IS_SLAVE_VALID(slv) \
+ (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
+
+#define INTERLEAVED_BW(fab_pdata, bw, ports) \
+ ((fab_pdata->il_flag) ? ((bw < 0) \
+ ? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw))
+#define INTERLEAVED_VAL(fab_pdata, n) \
+ ((fab_pdata->il_flag) ? (n) : 1)
+#define KBTOB(a) (a * 1000ULL)
+#define MAX_REG_NAME (50)
+
+enum msm_bus_dbg_op_type {
+ MSM_BUS_DBG_UNREGISTER = -2,
+ MSM_BUS_DBG_REGISTER,
+ MSM_BUS_DBG_OP = 1,
+};
+
+enum msm_bus_hw_sel {
+ MSM_BUS_RPM = 0,
+ MSM_BUS_NOC,
+ MSM_BUS_BIMC,
+};
+
+struct msm_bus_arb_ops {
+ uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata);
+ int (*update_request)(uint32_t cl, unsigned int index);
+ int (*update_context)(uint32_t cl, bool active_only,
+ unsigned int ctx_idx);
+ void (*unregister_client)(uint32_t cl);
+ struct msm_bus_client_handle*
+ (*register_cl)(uint32_t mas, uint32_t slv, char *name,
+ bool active_only);
+ int (*update_bw)(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+ void (*unregister)(struct msm_bus_client_handle *cl);
+ int (*update_bw_context)(struct msm_bus_client_handle *cl, u64 act_ab,
+ u64 act_ib, u64 dual_ib, u64 dual_ab);
+ int (*query_usecase)(struct msm_bus_tcs_usecase *tcs_usecase,
+ uint32_t cl, unsigned int index);
+ int (*query_usecase_all)(struct msm_bus_tcs_handle *tcs_handle,
+ uint32_t cl);
+
+};
+
+enum {
+ SLAVE_NODE,
+ MASTER_NODE,
+ CLK_NODE,
+ NR_LIM_NODE,
+};
+
+
+extern struct bus_type msm_bus_type;
+extern struct msm_bus_arb_ops arb_ops;
+extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops);
+
+struct msm_bus_node_info {
+ unsigned int id;
+ unsigned int priv_id;
+ unsigned int mas_hw_id;
+ unsigned int slv_hw_id;
+ int gateway;
+ int *masterp;
+ int *qport;
+ int num_mports;
+ int *slavep;
+ int num_sports;
+ int *tier;
+ int num_tiers;
+ int ahb;
+ int hw_sel;
+ const char *slaveclk[NUM_CTX];
+ const char *memclk[NUM_CTX];
+ const char *iface_clk_node;
+ unsigned int buswidth;
+ unsigned int ws;
+ unsigned int mode;
+ unsigned int perm_mode;
+ unsigned int prio_lvl;
+ unsigned int prio_rd;
+ unsigned int prio_wr;
+ unsigned int prio1;
+ unsigned int prio0;
+ unsigned int num_thresh;
+ u64 *th;
+ u64 cur_lim_bw;
+ unsigned int mode_thresh;
+ bool dual_conf;
+ u64 *bimc_bw;
+ bool nr_lim;
+ u32 ff;
+ bool rt_mas;
+ u32 bimc_gp;
+ u32 bimc_thmp;
+ u64 floor_bw;
+ const char *name;
+};
+
+struct path_node {
+ uint64_t clk[NUM_CTX];
+ uint64_t bw[NUM_CTX];
+ uint64_t *sel_clk;
+ uint64_t *sel_bw;
+ int next;
+};
+
+struct msm_bus_link_info {
+ uint64_t clk[NUM_CTX];
+ uint64_t *sel_clk;
+ uint64_t memclk;
+ int64_t bw[NUM_CTX];
+ int64_t *sel_bw;
+ int *tier;
+ int num_tiers;
+};
+
+struct nodeclk {
+ struct clk *clk;
+ struct regulator *reg;
+ uint64_t rate;
+ bool dirty;
+ bool enable_only_clk;
+ bool setrate_only_clk;
+ bool enable;
+ char reg_name[MAX_REG_NAME];
+};
+
+struct msm_bus_inode_info {
+ struct msm_bus_node_info *node_info;
+ uint64_t max_bw;
+ uint64_t max_clk;
+ uint64_t cur_lim_bw;
+ uint64_t cur_prg_bw;
+ struct msm_bus_link_info link_info;
+ int num_pnodes;
+ struct path_node *pnode;
+ int commit_index;
+ struct nodeclk nodeclk[NUM_CTX];
+ struct nodeclk memclk[NUM_CTX];
+ struct nodeclk iface_clk;
+ void *hw_data;
+};
+
+struct msm_bus_node_hw_info {
+ bool dirty;
+ unsigned int hw_id;
+ uint64_t bw;
+};
+
+struct msm_bus_hw_algorithm {
+ int (*allocate_commit_data)(struct msm_bus_fabric_registration
+ *fab_pdata, void **cdata, int ctx);
+ void *(*allocate_hw_data)(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *fab_pdata);
+ void (*node_init)(void *hw_data, struct msm_bus_inode_info *info);
+ void (*free_commit_data)(void *cdata);
+ void (*update_bw)(struct msm_bus_inode_info *hop,
+ struct msm_bus_inode_info *info,
+ struct msm_bus_fabric_registration *fab_pdata,
+ void *sel_cdata, int *master_tiers,
+ int64_t add_bw);
+ void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves);
+ int (*commit)(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata);
+ int (*port_unhalt)(uint32_t haltid, uint8_t mport);
+ int (*port_halt)(uint32_t haltid, uint8_t mport);
+ void (*config_master)(struct msm_bus_fabric_registration *fab_pdata,
+ struct msm_bus_inode_info *info,
+ uint64_t req_clk, uint64_t req_bw);
+ void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata,
+ struct msm_bus_inode_info *info);
+ bool (*update_bw_reg)(int mode);
+};
+
+struct msm_bus_fabric_device {
+ int id;
+ const char *name;
+ struct device dev;
+ const struct msm_bus_fab_algorithm *algo;
+ const struct msm_bus_board_algorithm *board_algo;
+ struct msm_bus_hw_algorithm hw_algo;
+ int visited;
+ int num_nr_lim;
+ u64 nr_lim_thresh;
+ u32 eff_fact;
+};
+#define to_msm_bus_fabric_device(d) container_of(d, \
+ struct msm_bus_fabric_device, d)
+
+struct msm_bus_fabric {
+ struct msm_bus_fabric_device fabdev;
+ int ahb;
+ void *cdata[NUM_CTX];
+ bool arb_dirty;
+ bool clk_dirty;
+ struct radix_tree_root fab_tree;
+ int num_nodes;
+ struct list_head gateways;
+ struct msm_bus_inode_info info;
+ struct msm_bus_fabric_registration *pdata;
+ void *hw_data;
+};
+#define to_msm_bus_fabric(d) container_of(d, \
+ struct msm_bus_fabric, d)
+
+
+struct msm_bus_fab_algorithm {
+ int (*update_clks)(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *pme, int index,
+ uint64_t curr_clk, uint64_t req_clk,
+ uint64_t bwsum, int flag, int ctx,
+ unsigned int cl_active_flag);
+ int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid);
+ int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid);
+ int (*commit)(struct msm_bus_fabric_device *fabdev);
+ struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device
+ *fabdev, int id);
+ struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device
+ *fabdev, int id);
+ struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev);
+ void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct
+ msm_bus_inode_info * hop, struct msm_bus_inode_info *info,
+ int64_t add_bw, int *master_tiers, int ctx);
+ void (*config_master)(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *info, uint64_t req_clk,
+ uint64_t req_bw);
+ void (*config_limiter)(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *info);
+};
+
+struct msm_bus_board_algorithm {
+ int board_nfab;
+ void (*assign_iids)(struct msm_bus_fabric_registration *fabreg,
+ int fabid);
+ int (*get_iid)(int id);
+};
+
+/**
+ * Used to store the list of fabrics and other info to be
+ * maintained outside the fabric structure.
+ * Used while calculating path, and to find fabric ptrs
+ */
+struct msm_bus_fabnodeinfo {
+ struct list_head list;
+ struct msm_bus_inode_info *info;
+};
+
+struct msm_bus_client {
+ int id;
+ struct msm_bus_scale_pdata *pdata;
+ int *src_pnode;
+ int curr;
+ struct device **src_devs;
+};
+
+uint64_t msm_bus_div64(uint64_t num, unsigned int base);
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
+int msm_bus_get_num_fab(void);
+
+
+int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata);
+#if defined(CONFIG_MSM_RPM_SMD)
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata);
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves);
+#else
+static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ return 0;
+}
+static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+ return 0;
+}
+static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf,
+ const int max_size, void *cdata, int nmasters, int nslaves,
+ int ntslaves)
+{
+}
+#endif
+
+int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_QCOM_BUS_SCALING)
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+ uint32_t cl);
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+ int nmasters, int nslaves, int ntslaves, int op);
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata);
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+ u64 ab, u64 ib);
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata);
+
+#else
+static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata,
+ int index, uint32_t cl)
+{
+}
+static inline void msm_bus_dbg_commit_data(const char *fabname,
+ void *cdata, int nmasters, int nslaves, int ntslaves,
+ int op)
+{
+}
+static inline void msm_bus_dbg_remove_client
+ (const struct msm_bus_client_handle *pdata)
+{
+}
+
+static inline int
+msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+ u64 ab, u64 ib)
+{
+ return 0;
+}
+
+static inline int
+msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_CORESIGHT
+int msmbus_coresight_init(struct platform_device *pdev);
+void msmbus_coresight_remove(struct platform_device *pdev);
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+ struct device_node *of_node);
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev);
+#else
+static inline int msmbus_coresight_init(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline void msmbus_coresight_remove(struct platform_device *pdev)
+{
+}
+
+static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+ struct device_node *of_node)
+{
+ return 0;
+}
+
+static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+}
+#endif
+
+
+#ifdef CONFIG_OF
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata);
+struct msm_bus_fabric_registration
+ *msm_bus_of_get_fab_data(struct platform_device *pdev);
+static inline void msm_bus_board_set_nfab(struct msm_bus_fabric_registration
+ *pdata, int nfab)
+{
+}
+#else
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+ int nfab);
+static inline void msm_bus_of_get_nfab(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata)
+{
+}
+
+static inline struct msm_bus_fabric_registration
+ *msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
new file mode 100644
index 0000000..b6abc56
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2010-2012, 2014-2018, The Linux Foundation. All rights
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/rtmutex.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_bus.h>
+
+#define MAX_BUFF_SIZE 4096
+#define FILL_LIMIT 128
+
+static struct dentry *clients;
+static struct dentry *dir;
+static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+static DEFINE_RT_MUTEX(msm_bus_dbg_cllist_lock);
+struct msm_bus_dbg_state {
+ uint32_t cl;
+ uint8_t enable;
+ uint8_t current_index;
+} clstate;
+
+struct msm_bus_cldata {
+ const struct msm_bus_scale_pdata *pdata;
+ const struct msm_bus_client_handle *handle;
+ int index;
+ uint32_t clid;
+ int size;
+ struct dentry *file;
+ struct list_head list;
+ char buffer[MAX_BUFF_SIZE];
+};
+
+struct msm_bus_fab_list {
+ const char *name;
+ int size;
+ struct dentry *file;
+ struct list_head list;
+ char buffer[MAX_BUFF_SIZE];
+};
+
+static char *rules_buf;
+
+LIST_HEAD(fabdata_list);
+LIST_HEAD(cl_list);
+
+/**
+ * The following structures and functions are used for
+ * the test-client which can be created at run-time.
+ */
+
+static struct msm_bus_vectors init_vectors[1];
+static struct msm_bus_vectors current_vectors[1];
+static struct msm_bus_vectors requested_vectors[1];
+
+static struct msm_bus_paths shell_client_usecases[] = {
+ {
+ .num_paths = ARRAY_SIZE(init_vectors),
+ .vectors = init_vectors,
+ },
+ {
+ .num_paths = ARRAY_SIZE(current_vectors),
+ .vectors = current_vectors,
+ },
+ {
+ .num_paths = ARRAY_SIZE(requested_vectors),
+ .vectors = requested_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata shell_client = {
+ .usecase = shell_client_usecases,
+ .num_usecases = ARRAY_SIZE(shell_client_usecases),
+ .name = "test-client",
+};
+
+static void msm_bus_dbg_init_vectors(void)
+{
+ init_vectors[0].src = -1;
+ init_vectors[0].dst = -1;
+ init_vectors[0].ab = 0;
+ init_vectors[0].ib = 0;
+ current_vectors[0].src = -1;
+ current_vectors[0].dst = -1;
+ current_vectors[0].ab = 0;
+ current_vectors[0].ib = 0;
+ requested_vectors[0].src = -1;
+ requested_vectors[0].dst = -1;
+ requested_vectors[0].ab = 0;
+ requested_vectors[0].ib = 0;
+ clstate.enable = 0;
+ clstate.current_index = 0;
+}
+
+static int msm_bus_dbg_update_cl_request(uint32_t cl)
+{
+ int ret = 0;
+
+ if (clstate.current_index < 2)
+ clstate.current_index = 2;
+ else {
+ clstate.current_index = 1;
+ current_vectors[0].ab = requested_vectors[0].ab;
+ current_vectors[0].ib = requested_vectors[0].ib;
+ }
+
+ if (clstate.enable) {
+ MSM_BUS_DBG("Updating request for shell client, index: %d\n",
+ clstate.current_index);
+ ret = msm_bus_scale_client_update_request(clstate.cl,
+ clstate.current_index);
+ } else
+ MSM_BUS_DBG("Enable bit not set. Skipping update request\n");
+
+ return ret;
+}
+
+static void msm_bus_dbg_unregister_client(uint32_t cl)
+{
+ MSM_BUS_DBG("Unregistering shell client\n");
+ msm_bus_scale_unregister_client(clstate.cl);
+ clstate.cl = 0;
+}
+
+static uint32_t msm_bus_dbg_register_client(void)
+{
+ int ret = 0;
+
+ if (init_vectors[0].src != requested_vectors[0].src) {
+ MSM_BUS_DBG("Shell client master changed. Unregistering\n");
+ msm_bus_dbg_unregister_client(clstate.cl);
+ }
+ if (init_vectors[0].dst != requested_vectors[0].dst) {
+ MSM_BUS_DBG("Shell client slave changed. Unregistering\n");
+ msm_bus_dbg_unregister_client(clstate.cl);
+ }
+
+ current_vectors[0].src = init_vectors[0].src;
+ requested_vectors[0].src = init_vectors[0].src;
+ current_vectors[0].dst = init_vectors[0].dst;
+ requested_vectors[0].dst = init_vectors[0].dst;
+
+ if (!clstate.enable) {
+ MSM_BUS_DBG("Enable bit not set, skipping registration: cl %d\n"
+ , clstate.cl);
+ return 0;
+ }
+
+ if (clstate.cl) {
+ MSM_BUS_DBG("Client registered, skipping registration\n");
+ return clstate.cl;
+ }
+
+ MSM_BUS_DBG("Registering shell client\n");
+ ret = msm_bus_scale_register_client(&shell_client);
+ return ret;
+}
+
+static int msm_bus_dbg_mas_get(void *data, u64 *val)
+{
+ *val = init_vectors[0].src;
+ MSM_BUS_DBG("Get master: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_mas_set(void *data, u64 val)
+{
+ init_vectors[0].src = val;
+ MSM_BUS_DBG("Set master: %llu\n", val);
+ clstate.cl = msm_bus_dbg_register_client();
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get,
+ msm_bus_dbg_mas_set, "%llu\n");
+
+static int msm_bus_dbg_slv_get(void *data, u64 *val)
+{
+ *val = init_vectors[0].dst;
+ MSM_BUS_DBG("Get slave: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_slv_set(void *data, u64 val)
+{
+ init_vectors[0].dst = val;
+ MSM_BUS_DBG("Set slave: %llu\n", val);
+ clstate.cl = msm_bus_dbg_register_client();
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get,
+ msm_bus_dbg_slv_set, "%llu\n");
+
+static int msm_bus_dbg_ab_get(void *data, u64 *val)
+{
+ *val = requested_vectors[0].ab;
+ MSM_BUS_DBG("Get ab: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_ab_set(void *data, u64 val)
+{
+ requested_vectors[0].ab = val;
+ MSM_BUS_DBG("Set ab: %llu\n", val);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get,
+ msm_bus_dbg_ab_set, "%llu\n");
+
+static int msm_bus_dbg_ib_get(void *data, u64 *val)
+{
+ *val = requested_vectors[0].ib;
+ MSM_BUS_DBG("Get ib: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_ib_set(void *data, u64 val)
+{
+ requested_vectors[0].ib = val;
+ MSM_BUS_DBG("Set ib: %llu\n", val);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get,
+ msm_bus_dbg_ib_set, "%llu\n");
+
+static int msm_bus_dbg_en_get(void *data, u64 *val)
+{
+ *val = clstate.enable;
+ MSM_BUS_DBG("Get enable: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_en_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ clstate.enable = val;
+ if (clstate.enable) {
+ if (!clstate.cl) {
+ MSM_BUS_DBG("client: %u\n", clstate.cl);
+ clstate.cl = msm_bus_dbg_register_client();
+ if (clstate.cl)
+ ret = msm_bus_dbg_update_cl_request(clstate.cl);
+ } else {
+ MSM_BUS_DBG("update request for cl: %u\n", clstate.cl);
+ ret = msm_bus_dbg_update_cl_request(clstate.cl);
+ }
+ }
+
+ MSM_BUS_DBG("Set enable: %llu\n", val);
+ return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get,
+ msm_bus_dbg_en_set, "%llu\n");
+
+/**
+ * The following functions are used for viewing the client data
+ * and changing the client request at run-time
+ */
+
+static ssize_t client_data_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bsize = 0;
+ uint32_t cl = (uint32_t)(uintptr_t)file->private_data;
+ struct msm_bus_cldata *cldata = NULL;
+ const struct msm_bus_client_handle *handle = file->private_data;
+ int found = 0;
+ ssize_t ret;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if ((cldata->clid == cl) ||
+ (cldata->handle && (cldata->handle == handle))) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ return 0;
+ }
+
+ bsize = cldata->size;
+ ret = simple_read_from_buffer(buf, count, ppos,
+ cldata->buffer, bsize);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ return ret;
+}
+
+static const struct file_operations client_data_fops = {
+ .open = simple_open,
+ .read = client_data_read,
+};
+
+struct dentry *msm_bus_dbg_create(const char *name, mode_t mode,
+ struct dentry *dent, uint32_t clid)
+{
+ if (dent == NULL) {
+ MSM_BUS_DBG("debugfs not ready yet\n");
+ return NULL;
+ }
+ return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid,
+ &client_data_fops);
+}
+
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+
+{
+ struct msm_bus_cldata *cldata;
+
+ cldata = kzalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+ if (!cldata) {
+ MSM_BUS_DBG("Failed to allocate memory for client data\n");
+ return -ENOMEM;
+ }
+ cldata->handle = pdata;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_add_tail(&cldata->list, &cl_list);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ return 0;
+}
+
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+ u64 ab, u64 ib)
+{
+ struct msm_bus_cldata *cldata;
+ int i;
+ struct timespec ts;
+ bool found = false;
+ char *buf = NULL;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->handle == pdata) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ return -ENOENT;
+ }
+
+ if (cldata->file == NULL) {
+ if (pdata->name == NULL) {
+ MSM_BUS_DBG("Client doesn't have a name\n");
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ return -EINVAL;
+ }
+ cldata->file = debugfs_create_file(pdata->name, 0444,
+ clients, (void *)pdata, &client_data_fops);
+ }
+
+ if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+ i = cldata->size;
+ else {
+ i = 0;
+ cldata->size = 0;
+ }
+ buf = cldata->buffer;
+ ts = ktime_to_timespec(ktime_get());
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+ ts.tv_sec, ts.tv_nsec);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "master: ");
+
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->mas);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslave : ");
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->slv);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : ");
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ab);
+
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : ");
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ib);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+ cldata->size = i;
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+ pdata->name, pdata->mas, pdata->slv, ab, ib);
+
+ return i;
+}
+
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
+{
+ struct msm_bus_cldata *cldata = NULL;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->handle == pdata) {
+ debugfs_remove(cldata->file);
+ list_del(&cldata->list);
+ kfree(cldata);
+ break;
+ }
+ }
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
+ int index, uint32_t clid, struct dentry *file)
+{
+ struct msm_bus_cldata *cldata;
+
+ cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+ if (!cldata) {
+ MSM_BUS_DBG("Failed to allocate memory for client data\n");
+ return -ENOMEM;
+ }
+ cldata->pdata = pdata;
+ cldata->index = index;
+ cldata->clid = clid;
+ cldata->file = file;
+ cldata->size = 0;
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_add_tail(&cldata->list, &cl_list);
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ return 0;
+}
+
+static void msm_bus_dbg_free_client(uint32_t clid)
+{
+ struct msm_bus_cldata *cldata = NULL;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->clid == clid) {
+ debugfs_remove(cldata->file);
+ list_del(&cldata->list);
+ kfree(cldata);
+ break;
+ }
+ }
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+}
+
+static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
+ int index, uint32_t clid)
+{
+ int i = 0, j;
+ char *buf = NULL;
+ struct msm_bus_cldata *cldata = NULL;
+ struct timespec ts;
+ int found = 0;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->clid == clid) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ return -ENOENT;
+ }
+
+ if (cldata->file == NULL) {
+ if (pdata->name == NULL) {
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ MSM_BUS_DBG("Client doesn't have a name\n");
+ return -EINVAL;
+ }
+ cldata->file = msm_bus_dbg_create(pdata->name, 0444,
+ clients, clid);
+ }
+
+ if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+ i = cldata->size;
+ else {
+ i = 0;
+ cldata->size = 0;
+ }
+ buf = cldata->buffer;
+ ts = ktime_to_timespec(ktime_get());
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+ ts.tv_sec, ts.tv_nsec);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr : %d\n", index);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: ");
+
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ",
+ pdata->usecase[index].vectors[j].src);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : ");
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ",
+ pdata->usecase[index].vectors[j].dst);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : ");
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ",
+ pdata->usecase[index].vectors[j].ab);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : ");
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ",
+ pdata->usecase[index].vectors[j].ib);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+ pdata->name,
+ pdata->usecase[index].vectors[j].src,
+ pdata->usecase[index].vectors[j].dst,
+ pdata->usecase[index].vectors[j].ab,
+ pdata->usecase[index].vectors[j].ib);
+
+ cldata->index = index;
+ cldata->size = i;
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ return i;
+}
+
+static ssize_t msm_bus_dbg_update_request_write(struct file *file,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct msm_bus_cldata *cldata;
+ unsigned long index = 0;
+ int ret = 0;
+ char *chid;
+ char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
+ int found = 0;
+ uint32_t clid;
+ ssize_t res = cnt;
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (cnt == 0) {
+ res = 0;
+ goto out;
+ }
+ if (copy_from_user(buf, ubuf, cnt)) {
+ res = -EFAULT;
+ goto out;
+ }
+ buf[cnt] = '\0';
+ chid = buf;
+ MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (strnstr(chid, cldata->pdata->name, cnt)) {
+ found = 1;
+ strsep(&chid, " ");
+ if (chid) {
+ ret = kstrtoul(chid, 10, &index);
+ if (ret) {
+ MSM_BUS_DBG("Index conversion\n"
+ " failed\n");
+ rt_mutex_unlock(
+ &msm_bus_dbg_cllist_lock);
+ res = -EFAULT;
+ goto out;
+ }
+ } else {
+ MSM_BUS_DBG("Error parsing input. Index not\n"
+ " found\n");
+ found = 0;
+ }
+ if (index > cldata->pdata->num_usecases) {
+ MSM_BUS_DBG("Invalid index!\n");
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+ res = -EINVAL;
+ goto out;
+ }
+ clid = cldata->clid;
+ break;
+ }
+ }
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ if (found)
+ msm_bus_scale_client_update_request(clid, index);
+
+out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * The following functions are used for viewing the commit data
+ * for each fabric
+ */
+static ssize_t fabric_data_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct msm_bus_fab_list *fablist = NULL;
+ int bsize = 0;
+ ssize_t ret;
+ const char *name = file->private_data;
+ int found = 0;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ if (strcmp(fablist->name, name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return -ENOENT;
+ }
+ bsize = fablist->size;
+ ret = simple_read_from_buffer(buf, count, ppos,
+ fablist->buffer, bsize);
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return ret;
+}
+
+static const struct file_operations fabric_data_fops = {
+ .open = simple_open,
+ .read = fabric_data_read,
+};
+
+static ssize_t rules_dbg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+
+ memset(rules_buf, 0, MAX_BUFF_SIZE);
+ print_rules_buf(rules_buf, MAX_BUFF_SIZE);
+ ret = simple_read_from_buffer(buf, count, ppos,
+ rules_buf, MAX_BUFF_SIZE);
+ return ret;
+}
+
+static int rules_dbg_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations rules_dbg_fops = {
+ .open = simple_open,
+ .read = rules_dbg_read,
+};
+
+static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file)
+{
+ struct msm_bus_fab_list *fablist;
+ int ret = 0;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL);
+ if (!fablist) {
+ MSM_BUS_DBG("Failed to allocate memory for commit data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ fablist->name = fabname;
+ fablist->size = 0;
+ list_add_tail(&fablist->list, &fabdata_list);
+err:
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return ret;
+}
+
+static void msm_bus_dbg_free_fabric(const char *fabname)
+{
+ struct msm_bus_fab_list *fablist = NULL;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ if (strcmp(fablist->name, fabname) == 0) {
+ debugfs_remove(fablist->file);
+ list_del(&fablist->list);
+ kfree(fablist);
+ break;
+ }
+ }
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+
+static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
+ void *cdata, int nmasters, int nslaves,
+ int ntslaves)
+{
+ int i;
+ char *buf = NULL;
+ struct msm_bus_fab_list *fablist = NULL;
+ struct timespec ts;
+ int found = 0;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ if (strcmp(fablist->name, fabname) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return -ENOENT;
+ }
+
+ if (fablist->file == NULL) {
+ MSM_BUS_DBG("Fabric dbg entry does not exist\n");
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return -EFAULT;
+ }
+
+ if (fablist->size < MAX_BUFF_SIZE - 256)
+ i = fablist->size;
+ else {
+ i = 0;
+ fablist->size = 0;
+ }
+ buf = fablist->buffer;
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ ts = ktime_to_timespec(ktime_get());
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+ ts.tv_sec, ts.tv_nsec);
+
+ msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata,
+ nmasters, nslaves, ntslaves);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ fablist->size = i;
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return 0;
+}
+
+static const struct file_operations msm_bus_dbg_update_request_fops = {
+ .open = simple_open,
+ .write = msm_bus_dbg_update_request_write,
+};
+
+static ssize_t msm_bus_dbg_dump_clients_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ int j, cnt;
+ char msg[50];
+ struct msm_bus_cldata *cldata = NULL;
+
+ cnt = scnprintf(msg, 50,
+ "\nDumping curent client votes to trace log\n");
+ if (*ppos)
+ goto exit_dump_clients_read;
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (IS_ERR_OR_NULL(cldata->pdata))
+ continue;
+ for (j = 0; j < cldata->pdata->usecase->num_paths; j++) {
+ if (cldata->index == -1)
+ continue;
+ trace_bus_client_status(
+ cldata->pdata->name,
+ cldata->pdata->usecase[cldata->index].vectors[j].src,
+ cldata->pdata->usecase[cldata->index].vectors[j].dst,
+ cldata->pdata->usecase[cldata->index].vectors[j].ab,
+ cldata->pdata->usecase[cldata->index].vectors[j].ib,
+ cldata->pdata->active_only);
+ }
+ }
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+exit_dump_clients_read:
+ return simple_read_from_buffer(buf, count, ppos, msg, cnt);
+}
+
+static const struct file_operations msm_bus_dbg_dump_clients_fops = {
+ .open = simple_open,
+ .read = msm_bus_dbg_dump_clients_read,
+};
+
+/**
+ * msm_bus_dbg_client_data() - Add debug data for clients
+ * @pdata: Platform data of the client
+ * @index: The current index or operation to be performed
+ * @clid: Client handle obtained during registration
+ */
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+ uint32_t clid)
+{
+ struct dentry *file = NULL;
+
+ if (index == MSM_BUS_DBG_REGISTER) {
+ msm_bus_dbg_record_client(pdata, index, clid, file);
+ if (!pdata->name) {
+ MSM_BUS_DBG("Cannot create debugfs entry. Null name\n");
+ return;
+ }
+ } else if (index == MSM_BUS_DBG_UNREGISTER) {
+ msm_bus_dbg_free_client(clid);
+ MSM_BUS_DBG("Client %d unregistered\n", clid);
+ } else
+ msm_bus_dbg_fill_cl_buffer(pdata, index, clid);
+}
+EXPORT_SYMBOL(msm_bus_dbg_client_data);
+
+/**
+ * msm_bus_dbg_commit_data() - Add commit data from fabrics
+ * @fabname: Fabric name specified in platform data
+ * @cdata: Commit Data
+ * @nmasters: Number of masters attached to fabric
+ * @nslaves: Number of slaves attached to fabric
+ * @ntslaves: Number of tiered slaves attached to fabric
+ * @op: Operation to be performed
+ */
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+ int nmasters, int nslaves, int ntslaves, int op)
+{
+ struct dentry *file = NULL;
+
+ if (op == MSM_BUS_DBG_REGISTER)
+ msm_bus_dbg_record_fabric(fabname, file);
+ else if (op == MSM_BUS_DBG_UNREGISTER)
+ msm_bus_dbg_free_fabric(fabname);
+ else
+ msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters,
+ nslaves, ntslaves);
+}
+EXPORT_SYMBOL(msm_bus_dbg_commit_data);
+
+static int __init msm_bus_debugfs_init(void)
+{
+ struct dentry *commit, *shell_client, *rules_dbg;
+ struct msm_bus_fab_list *fablist;
+ struct msm_bus_cldata *cldata = NULL;
+ uint64_t val = 0;
+
+ dir = debugfs_create_dir("msm-bus-dbg", NULL);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create msm-bus-dbg\n");
+ goto err;
+ }
+
+ clients = debugfs_create_dir("client-data", dir);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create clients\n");
+ goto err;
+ }
+
+ shell_client = debugfs_create_dir("shell-client", dir);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create clients\n");
+ goto err;
+ }
+
+ commit = debugfs_create_dir("commit-data", dir);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create commit\n");
+ goto err;
+ }
+
+ rules_dbg = debugfs_create_dir("rules-dbg", dir);
+ if ((!rules_dbg) || IS_ERR(rules_dbg)) {
+ MSM_BUS_ERR("Couldn't create rules-dbg\n");
+ goto err;
+ }
+
+ if (debugfs_create_file("print_rules", 0644,
+ rules_dbg, &val, &rules_dbg_fops) == NULL)
+ goto err;
+
+ if (debugfs_create_file("update_request", 0644,
+ shell_client, &val, &shell_client_en_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("ib", 0644, shell_client, &val,
+ &shell_client_ib_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("ab", 0644, shell_client, &val,
+ &shell_client_ab_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("slv", 0644, shell_client,
+ &val, &shell_client_slv_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("mas", 0644, shell_client,
+ &val, &shell_client_mas_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("update-request", 0644,
+ clients, NULL, &msm_bus_dbg_update_request_fops) == NULL)
+ goto err;
+
+ rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ if (!rules_buf) {
+ MSM_BUS_ERR("Failed to alloc rules_buf");
+ goto err;
+ }
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->pdata) {
+ if (cldata->pdata->name == NULL) {
+ MSM_BUS_DBG("Client name not found\n");
+ continue;
+ }
+ cldata->file = msm_bus_dbg_create(cldata->pdata->name,
+ 0444, clients, cldata->clid);
+ } else if (cldata->handle) {
+ if (cldata->handle->name == NULL) {
+ MSM_BUS_DBG("Client doesn't have a name\n");
+ continue;
+ }
+ cldata->file = debugfs_create_file(cldata->handle->name,
+ 0444, clients,
+ (void *)cldata->handle,
+ &client_data_fops);
+ }
+ }
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ if (debugfs_create_file("dump_clients", 0644,
+ clients, NULL, &msm_bus_dbg_dump_clients_fops) == NULL)
+ goto err;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ fablist->file = debugfs_create_file(fablist->name, 0444,
+ commit, (void *)fablist->name, &fabric_data_fops);
+ if (fablist->file == NULL) {
+ MSM_BUS_DBG("Cannot create files for commit data\n");
+ kfree(rules_buf);
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ goto err;
+ }
+ }
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+
+ msm_bus_dbg_init_vectors();
+ return 0;
+err:
+ debugfs_remove_recursive(dir);
+ return -ENODEV;
+}
+late_initcall(msm_bus_debugfs_init);
+
+static void __exit msm_bus_dbg_teardown(void)
+{
+ struct msm_bus_fab_list *fablist = NULL, *fablist_temp;
+ struct msm_bus_cldata *cldata = NULL, *cldata_temp;
+
+ debugfs_remove_recursive(dir);
+
+ rt_mutex_lock(&msm_bus_dbg_cllist_lock);
+ list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
+ list_del(&cldata->list);
+ kfree(cldata);
+ }
+ rt_mutex_unlock(&msm_bus_dbg_cllist_lock);
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
+ list_del(&fablist->list);
+ kfree(fablist);
+ }
+ kfree(rules_buf);
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+module_exit(msm_bus_dbg_teardown);
+MODULE_DESCRIPTION("Debugfs for msm bus scaling client");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
new file mode 100644
index 0000000..f7513b0
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2016, 2018, Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+#include <trace/events/trace_msm_bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data);
+
+ssize_t bw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_bus_node_info_type *node_info = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int i;
+ int off = 0;
+
+ bus_node = to_msm_bus_node(dev);
+ if (!bus_node)
+ return -EINVAL;
+
+ node_info = bus_node->node_info;
+
+ for (i = 0; i < bus_node->num_lnodes; i++) {
+ if (!bus_node->lnode_list[i].in_use)
+ continue;
+ off += scnprintf((buf + off), PAGE_SIZE,
+ "[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+ i, bus_node->lnode_list[i].cl_name,
+ bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+ bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+ bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+ bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+ }
+ off += scnprintf((buf + off), PAGE_SIZE,
+ "Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+ bus_node->node_bw[ACTIVE_CTX].max_ib,
+ bus_node->node_bw[ACTIVE_CTX].sum_ab,
+ bus_node->node_bw[ACTIVE_CTX].util_used,
+ bus_node->node_bw[ACTIVE_CTX].vrail_used);
+ off += scnprintf((buf + off), PAGE_SIZE,
+ "Max_Slp_IB %llu Sum_Slp_AB %llu Slp_Util_fact %d Slp_Vrail_comp %d\n",
+ bus_node->node_bw[DUAL_CTX].max_ib,
+ bus_node->node_bw[DUAL_CTX].sum_ab,
+ bus_node->node_bw[DUAL_CTX].util_used,
+ bus_node->node_bw[DUAL_CTX].vrail_used);
+ return off;
+}
+
+ssize_t bw_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return count;
+}
+
+DEVICE_ATTR_RW(bw);
+
+struct static_rules_type {
+ int num_rules;
+ struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int bus_get_reg(struct nodeclk *nclk, struct device *dev)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *node_dev;
+
+ if (!(dev && nclk))
+ return -ENXIO;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!strlen(nclk->reg_name)) {
+ dev_dbg(dev, "No regulator exist for node %d\n",
+ node_dev->node_info->id);
+ goto exit_of_get_reg;
+ } else {
+ if (!(IS_ERR_OR_NULL(nclk->reg)))
+ goto exit_of_get_reg;
+
+ nclk->reg = devm_regulator_get(dev, nclk->reg_name);
+ if (IS_ERR_OR_NULL(nclk->reg)) {
+ ret =
+ (IS_ERR(nclk->reg) ? PTR_ERR(nclk->reg) : -ENXIO);
+ dev_err(dev, "Error: Failed to get regulator %s:%d\n",
+ nclk->reg_name, ret);
+ } else {
+ dev_dbg(dev, "Successfully got regulator for %d\n",
+ node_dev->node_info->id);
+ }
+ }
+
+exit_of_get_reg:
+ return ret;
+}
+
+static int bus_enable_reg(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (!nclk) {
+ ret = -ENXIO;
+ goto exit_bus_enable_reg;
+ }
+
+ if ((IS_ERR_OR_NULL(nclk->reg))) {
+ ret = -ENXIO;
+ goto exit_bus_enable_reg;
+ }
+
+ ret = regulator_enable(nclk->reg);
+ if (ret) {
+ MSM_BUS_ERR("Failed to enable regulator for %s\n",
+ nclk->reg_name);
+ goto exit_bus_enable_reg;
+ }
+ pr_debug("%s: Enabled Reg\n", __func__);
+exit_bus_enable_reg:
+ return ret;
+}
+
+static int bus_disable_reg(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (!nclk) {
+ ret = -ENXIO;
+ goto exit_bus_disable_reg;
+ }
+
+ if ((IS_ERR_OR_NULL(nclk->reg))) {
+ ret = -ENXIO;
+ goto exit_bus_disable_reg;
+ }
+
+ regulator_disable(nclk->reg);
+ pr_debug("%s: Disabled Reg\n", __func__);
+exit_bus_disable_reg:
+ return ret;
+}
+
+static int enable_nodeclk(struct nodeclk *nclk, struct device *dev)
+{
+ int ret = 0;
+
+ if (!nclk->enable && !nclk->setrate_only_clk) {
+ if (dev && strlen(nclk->reg_name)) {
+ if (IS_ERR_OR_NULL(nclk->reg)) {
+ ret = bus_get_reg(nclk, dev);
+ if (ret) {
+ dev_dbg(dev,
+ "Failed to get reg.Err %d\n",
+ ret);
+ goto exit_enable_nodeclk;
+ }
+ }
+
+ ret = bus_enable_reg(nclk);
+ if (ret) {
+ dev_dbg(dev, "Failed to enable reg. Err %d\n",
+ ret);
+ goto exit_enable_nodeclk;
+ }
+ }
+ ret = clk_prepare_enable(nclk->clk);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+ nclk->enable = false;
+ } else
+ nclk->enable = true;
+ }
+exit_enable_nodeclk:
+ return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (nclk->enable && !nclk->setrate_only_clk) {
+ clk_disable_unprepare(nclk->clk);
+ nclk->enable = false;
+ bus_disable_reg(nclk);
+ }
+ return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+ int ret = 0;
+
+ if (!nclk->enable_only_clk)
+ ret = clk_set_rate(nclk->clk, rate);
+
+ if (ret)
+ MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+ return ret;
+}
+
+static int send_rpm_msg(struct msm_bus_node_device_type *ndev, int ctx)
+{
+ int ret = 0;
+ int rsc_type;
+ struct msm_rpm_kvp rpm_kvp;
+ int rpm_ctx;
+
+ if (!ndev) {
+ MSM_BUS_ERR("%s: Error getting node info.", __func__);
+ ret = -ENODEV;
+ goto exit_send_rpm_msg;
+ }
+
+ rpm_kvp.length = sizeof(uint64_t);
+ rpm_kvp.key = RPM_MASTER_FIELD_BW;
+
+ if (ctx == DUAL_CTX)
+ rpm_ctx = MSM_RPM_CTX_SLEEP_SET;
+ else
+ rpm_ctx = MSM_RPM_CTX_ACTIVE_SET;
+
+ rpm_kvp.data = (uint8_t *)&ndev->node_bw[ctx].sum_ab;
+
+ if (ndev->node_info->mas_rpm_id != -1) {
+ rsc_type = RPM_BUS_MASTER_REQ;
+ ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+ ndev->node_info->mas_rpm_id, &rpm_kvp, 1);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to send RPM message:",
+ __func__);
+ MSM_BUS_ERR("%s:Node Id %d RPM id %d",
+ __func__, ndev->node_info->id,
+ ndev->node_info->mas_rpm_id);
+ goto exit_send_rpm_msg;
+ }
+ trace_bus_agg_bw(ndev->node_info->id,
+ ndev->node_info->mas_rpm_id, rpm_ctx,
+ ndev->node_bw[ctx].sum_ab);
+ }
+
+ if (ndev->node_info->slv_rpm_id != -1) {
+ rsc_type = RPM_BUS_SLAVE_REQ;
+ ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+ ndev->node_info->slv_rpm_id, &rpm_kvp, 1);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to send RPM message:",
+ __func__);
+ MSM_BUS_ERR("%s: Node Id %d RPM id %d",
+ __func__, ndev->node_info->id,
+ ndev->node_info->slv_rpm_id);
+ goto exit_send_rpm_msg;
+ }
+ trace_bus_agg_bw(ndev->node_info->id,
+ ndev->node_info->slv_rpm_id, rpm_ctx,
+ ndev->node_bw[ctx].sum_ab);
+ }
+exit_send_rpm_msg:
+ return ret;
+}
+
+static int flush_bw_data(struct msm_bus_node_device_type *node_info, int ctx)
+{
+ int ret = 0;
+
+ if (!node_info) {
+ MSM_BUS_ERR("%s: Unable to find bus device for device",
+ __func__);
+ ret = -ENODEV;
+ goto exit_flush_bw_data;
+ }
+
+ if (node_info->node_bw[ctx].last_sum_ab !=
+ node_info->node_bw[ctx].sum_ab) {
+ if (node_info->ap_owned) {
+ struct msm_bus_node_device_type *bus_device =
+ to_msm_bus_node(node_info->node_info->bus_device);
+ struct msm_bus_fab_device_type *fabdev =
+ bus_device->fabdev;
+
+ /*
+ * For AP owned ports, only care about the Active
+ * context bandwidth.
+ */
+ if (fabdev && (ctx == ACTIVE_CTX) &&
+ fabdev->noc_ops.update_bw_reg &&
+ fabdev->noc_ops.update_bw_reg
+ (node_info->node_info->qos_params.mode))
+ ret = fabdev->noc_ops.set_bw(node_info,
+ fabdev->qos_base,
+ fabdev->base_offset,
+ fabdev->qos_off,
+ fabdev->qos_freq);
+ } else {
+ ret = send_rpm_msg(node_info, ctx);
+
+ if (ret)
+ MSM_BUS_ERR("%s: Failed to send RPM msg for%d",
+ __func__, node_info->node_info->id);
+ }
+ node_info->node_bw[ctx].last_sum_ab =
+ node_info->node_bw[ctx].sum_ab;
+ }
+
+exit_flush_bw_data:
+ return ret;
+
+}
+
+static int flush_clk_data(struct msm_bus_node_device_type *node, int ctx)
+{
+ struct nodeclk *nodeclk = NULL;
+ int ret = 0;
+
+ if (!node) {
+ MSM_BUS_ERR("Unable to find bus device");
+ ret = -ENODEV;
+ goto exit_flush_clk_data;
+ }
+
+ nodeclk = &node->clk[ctx];
+
+ if (IS_ERR_OR_NULL(nodeclk) || IS_ERR_OR_NULL(nodeclk->clk))
+ goto exit_flush_clk_data;
+
+ if (nodeclk->rate != node->node_bw[ctx].cur_clk_hz) {
+ long rounded_rate;
+
+ nodeclk->rate = node->node_bw[ctx].cur_clk_hz;
+ nodeclk->dirty = true;
+
+ if (nodeclk->rate) {
+ rounded_rate = clk_round_rate(nodeclk->clk,
+ nodeclk->rate);
+ ret = setrate_nodeclk(nodeclk, rounded_rate);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to set_rate %lu for %d",
+ __func__, rounded_rate,
+ node->node_info->id);
+ ret = -ENODEV;
+ goto exit_flush_clk_data;
+ }
+
+ ret = enable_nodeclk(nodeclk, &node->dev);
+
+ if ((node->node_info->is_fab_dev) &&
+ !IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+ ret = enable_nodeclk(&node->bus_qos_clk,
+ &node->dev);
+ } else {
+ if ((node->node_info->is_fab_dev) &&
+ !IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+ ret = disable_nodeclk(&node->bus_qos_clk);
+
+ ret = disable_nodeclk(nodeclk);
+ }
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to enable for %d", __func__,
+ node->node_info->id);
+ ret = -ENODEV;
+ goto exit_flush_clk_data;
+ }
+ MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__,
+ node->node_info->id, nodeclk->rate);
+ }
+exit_flush_clk_data:
+ /* Reset the aggregated clock rate for fab devices*/
+ if (node && node->node_info->is_fab_dev)
+ node->node_bw[ctx].cur_clk_hz = 0;
+
+ if (nodeclk)
+ nodeclk->dirty = false;
+ return ret;
+}
+
+static int msm_bus_agg_fab_clks(struct msm_bus_node_device_type *bus_dev)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *node;
+ int ctx;
+
+ list_for_each_entry(node, &bus_dev->devlist, dev_link) {
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ if (node->node_bw[ctx].cur_clk_hz >=
+ bus_dev->node_bw[ctx].cur_clk_hz)
+ bus_dev->node_bw[ctx].cur_clk_hz =
+ node->node_bw[ctx].cur_clk_hz;
+ }
+ }
+ return ret;
+}
+
+int msm_bus_commit_data(struct list_head *clist)
+{
+ int ret = 0;
+ int ctx;
+ struct msm_bus_node_device_type *node;
+ struct msm_bus_node_device_type *node_tmp;
+
+ list_for_each_entry(node, clist, link) {
+ /* Aggregate the bus clocks */
+ if (node->node_info->is_fab_dev)
+ msm_bus_agg_fab_clks(node);
+ }
+
+ list_for_each_entry_safe(node, node_tmp, clist, link) {
+ if (unlikely(node->node_info->defer_qos))
+ msm_bus_dev_init_qos(&node->dev, NULL);
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ ret = flush_clk_data(node, ctx);
+ if (ret)
+ MSM_BUS_ERR("%s: Err flushing clk data for:%d",
+ __func__, node->node_info->id);
+ ret = flush_bw_data(node, ctx);
+ if (ret)
+ MSM_BUS_ERR("%s: Error flushing bw data for %d",
+ __func__, node->node_info->id);
+ }
+ node->dirty = false;
+ list_del_init(&node->link);
+ }
+ return ret;
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+ size_t new_size, gfp_t flags)
+{
+ void *ret;
+ size_t copy_size = old_size;
+
+ if (!new_size) {
+ devm_kfree(dev, p);
+ return ZERO_SIZE_PTR;
+ }
+
+ if (new_size < old_size)
+ copy_size = new_size;
+
+ ret = devm_kzalloc(dev, new_size, flags);
+ if (!ret)
+ goto exit_realloc_devmem;
+
+ memcpy(ret, p, copy_size);
+ devm_kfree(dev, p);
+exit_realloc_devmem:
+ return ret;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ switch (bus_dev->fabdev->bus_type) {
+ case MSM_BUS_NOC:
+ msm_bus_noc_set_ops(bus_dev);
+ break;
+ case MSM_BUS_BIMC:
+ msm_bus_bimc_set_ops(bus_dev);
+ break;
+ default:
+ MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+ }
+}
+
+static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int i;
+ int ret = 0;
+
+ if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+ ret = -ENXIO;
+ goto exit_disable_node_qos_clk;
+ }
+
+ for (i = 0; i < node->num_node_qos_clks; i++)
+ ret = disable_nodeclk(&node->node_qos_clks[i]);
+
+ bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+ for (i = 0; i < bus_node->num_node_qos_clks; i++)
+ ret = disable_nodeclk(&bus_node->node_qos_clks[i]);
+
+exit_disable_node_qos_clk:
+ return ret;
+}
+
+static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int i;
+ int ret;
+ long rounded_rate;
+
+ if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+ ret = -ENXIO;
+ goto exit_enable_node_qos_clk;
+ }
+ bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+ for (i = 0; i < node->num_node_qos_clks; i++) {
+ if (!node->node_qos_clks[i].enable_only_clk) {
+ rounded_rate =
+ clk_round_rate(
+ node->node_qos_clks[i].clk, 1);
+ ret = setrate_nodeclk(&node->node_qos_clks[i],
+ rounded_rate);
+ if (ret)
+ MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+ __func__, node->node_info->id);
+ }
+ ret = enable_nodeclk(&node->node_qos_clks[i],
+ node->node_info->bus_device);
+ if (ret) {
+ MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+ __func__, ret);
+ msm_bus_disable_node_qos_clk(node);
+ goto exit_enable_node_qos_clk;
+ }
+
+ }
+
+ for (i = 0; i < bus_node->num_node_qos_clks; i++) {
+ if (!bus_node->node_qos_clks[i].enable_only_clk) {
+ rounded_rate =
+ clk_round_rate(
+ bus_node->node_qos_clks[i].clk, 1);
+ ret = setrate_nodeclk(&bus_node->node_qos_clks[i],
+ rounded_rate);
+ if (ret)
+ MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+ __func__, node->node_info->id);
+ }
+ ret = enable_nodeclk(&bus_node->node_qos_clks[i],
+ node->node_info->bus_device);
+ if (ret) {
+ MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+ __func__, ret);
+ msm_bus_disable_node_qos_clk(node);
+ goto exit_enable_node_qos_clk;
+ }
+
+ }
+exit_enable_node_qos_clk:
+ return ret;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+ int enable, uint64_t lim_bw)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *bus_node_dev;
+
+ if (!node_dev) {
+ MSM_BUS_ERR("No device specified");
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+
+ if (!node_dev->ap_owned) {
+ MSM_BUS_ERR("Device is not AP owned %d",
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+
+ bus_node_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+ if (!bus_node_dev) {
+ MSM_BUS_ERR("Unable to get bus device infofor %d",
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+ if (bus_node_dev->fabdev &&
+ bus_node_dev->fabdev->noc_ops.limit_mport) {
+ if (ret < 0) {
+ MSM_BUS_ERR("Can't Enable QoS clk %d",
+ node_dev->node_info->id);
+ goto exit_enable_limiter;
+ }
+ bus_node_dev->fabdev->noc_ops.limit_mport(
+ node_dev,
+ bus_node_dev->fabdev->qos_base,
+ bus_node_dev->fabdev->base_offset,
+ bus_node_dev->fabdev->qos_off,
+ bus_node_dev->fabdev->qos_freq,
+ enable, lim_bw);
+ }
+
+exit_enable_limiter:
+ return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *node_dev = NULL;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+ ret = -ENXIO;
+ goto exit_init_qos;
+ }
+
+ MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+ if (node_dev->ap_owned) {
+ struct msm_bus_node_device_type *bus_node_info;
+
+ bus_node_info =
+ to_msm_bus_node(node_dev->node_info->bus_device);
+
+ if (!bus_node_info) {
+ MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+ __func__,
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_init_qos;
+ }
+
+ if (bus_node_info->fabdev &&
+ bus_node_info->fabdev->noc_ops.qos_init) {
+ int ret = 0;
+
+ if (node_dev->ap_owned &&
+ (node_dev->node_info->qos_params.mode) != -1) {
+
+ if (bus_node_info->fabdev->bypass_qos_prg)
+ goto exit_init_qos;
+
+ ret = msm_bus_enable_node_qos_clk(node_dev);
+ if (ret < 0) {
+ MSM_BUS_DBG("Can't Enable QoS clk %d\n",
+ node_dev->node_info->id);
+ node_dev->node_info->defer_qos = true;
+ goto exit_init_qos;
+ }
+
+ bus_node_info->fabdev->noc_ops.qos_init(
+ node_dev,
+ bus_node_info->fabdev->qos_base,
+ bus_node_info->fabdev->base_offset,
+ bus_node_info->fabdev->qos_off,
+ bus_node_info->fabdev->qos_freq);
+ ret = msm_bus_disable_node_qos_clk(node_dev);
+ node_dev->node_info->defer_qos = false;
+ }
+ } else
+ MSM_BUS_ERR("%s: Skipping QOS init for %d",
+ __func__, node_dev->node_info->id);
+ }
+exit_init_qos:
+ return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ struct msm_bus_fab_device_type *fabdev;
+ struct msm_bus_node_device_type *node_dev = NULL;
+ int ret = 0;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Unable to get bus device info", __func__);
+ ret = -ENXIO;
+ goto exit_fabric_init;
+ }
+
+ if (node_dev->node_info->virt_dev) {
+ MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+ node_dev->node_info->id);
+ goto exit_fabric_init;
+ }
+
+ fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+ GFP_KERNEL);
+ if (!fabdev) {
+ ret = -ENOMEM;
+ goto exit_fabric_init;
+ }
+
+ node_dev->fabdev = fabdev;
+ fabdev->pqos_base = pdata->fabdev->pqos_base;
+ fabdev->qos_range = pdata->fabdev->qos_range;
+ fabdev->base_offset = pdata->fabdev->base_offset;
+ fabdev->qos_off = pdata->fabdev->qos_off;
+ fabdev->qos_freq = pdata->fabdev->qos_freq;
+ fabdev->bus_type = pdata->fabdev->bus_type;
+ fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+ msm_bus_fab_init_noc_ops(node_dev);
+
+ fabdev->qos_base = devm_ioremap(dev,
+ fabdev->pqos_base, fabdev->qos_range);
+ if (!fabdev->qos_base) {
+ MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+ __func__,
+ (size_t)fabdev->pqos_base, node_dev->node_info->id);
+ ret = -ENOMEM;
+ goto exit_fabric_init;
+ }
+
+exit_fabric_init:
+ return ret;
+}
+
+static int msm_bus_init_clk(struct device *bus_dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ unsigned int ctx;
+ struct msm_bus_node_device_type *node_dev = to_msm_bus_node(bus_dev);
+ int i;
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+ node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+ node_dev->clk[ctx].enable_only_clk =
+ pdata->clk[ctx].enable_only_clk;
+ node_dev->clk[ctx].setrate_only_clk =
+ pdata->clk[ctx].setrate_only_clk;
+ node_dev->clk[ctx].enable = false;
+ node_dev->clk[ctx].dirty = false;
+ strlcpy(node_dev->clk[ctx].reg_name,
+ pdata->clk[ctx].reg_name, MAX_REG_NAME);
+ node_dev->clk[ctx].reg = NULL;
+ bus_get_reg(&node_dev->clk[ctx], bus_dev);
+ MSM_BUS_DBG("%s: Valid node clk node %d ctx %d\n",
+ __func__, node_dev->node_info->id, ctx);
+ }
+ }
+
+ if (!IS_ERR_OR_NULL(pdata->bus_qos_clk.clk)) {
+ node_dev->bus_qos_clk.clk = pdata->bus_qos_clk.clk;
+ node_dev->bus_qos_clk.enable_only_clk =
+ pdata->bus_qos_clk.enable_only_clk;
+ node_dev->bus_qos_clk.setrate_only_clk =
+ pdata->bus_qos_clk.setrate_only_clk;
+ node_dev->bus_qos_clk.enable = false;
+ strlcpy(node_dev->bus_qos_clk.reg_name,
+ pdata->bus_qos_clk.reg_name, MAX_REG_NAME);
+ node_dev->bus_qos_clk.reg = NULL;
+ MSM_BUS_DBG("%s: Valid bus qos clk node %d\n", __func__,
+ node_dev->node_info->id);
+ }
+
+ if (pdata->num_node_qos_clks) {
+ node_dev->num_node_qos_clks = pdata->num_node_qos_clks;
+ node_dev->node_qos_clks = devm_kzalloc(bus_dev,
+ (node_dev->num_node_qos_clks * sizeof(struct nodeclk)),
+ GFP_KERNEL);
+ if (!node_dev->node_qos_clks)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_node_qos_clks; i++) {
+ node_dev->node_qos_clks[i].clk =
+ pdata->node_qos_clks[i].clk;
+ node_dev->node_qos_clks[i].enable_only_clk =
+ pdata->node_qos_clks[i].enable_only_clk;
+ node_dev->node_qos_clks[i].setrate_only_clk =
+ pdata->node_qos_clks[i].setrate_only_clk;
+ node_dev->node_qos_clks[i].enable = false;
+ strlcpy(node_dev->node_qos_clks[i].reg_name,
+ pdata->node_qos_clks[i].reg_name, MAX_REG_NAME);
+ node_dev->node_qos_clks[i].reg = NULL;
+ MSM_BUS_DBG("%s: Valid qos clk[%d] node %d %d Reg%s\n",
+ __func__, i,
+ node_dev->node_info->id,
+ node_dev->num_node_qos_clks,
+ node_dev->node_qos_clks[i].reg_name);
+ }
+ }
+
+ return 0;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+ struct device *bus_dev)
+{
+ int ret = 0;
+ struct msm_bus_node_info_type *node_info = NULL;
+ struct msm_bus_node_info_type *pdata_node_info = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = to_msm_bus_node(bus_dev);
+
+ if (!bus_node || !pdata) {
+ ret = -ENXIO;
+ MSM_BUS_ERR("%s: NULL pointers for pdata or bus_node",
+ __func__);
+ goto exit_copy_node_info;
+ }
+
+ node_info = bus_node->node_info;
+ pdata_node_info = pdata->node_info;
+
+ node_info->name = pdata_node_info->name;
+ node_info->id = pdata_node_info->id;
+ node_info->bus_device_id = pdata_node_info->bus_device_id;
+ node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+ node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+ node_info->num_connections = pdata_node_info->num_connections;
+ node_info->num_blist = pdata_node_info->num_blist;
+ node_info->num_qports = pdata_node_info->num_qports;
+ node_info->virt_dev = pdata_node_info->virt_dev;
+ node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+ node_info->qos_params.mode = pdata_node_info->qos_params.mode;
+ node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
+ node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
+ node_info->qos_params.reg_prio1 = pdata_node_info->qos_params.reg_prio1;
+ node_info->qos_params.reg_prio0 = pdata_node_info->qos_params.reg_prio0;
+ node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
+ node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd;
+ node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr;
+ node_info->qos_params.gp = pdata_node_info->qos_params.gp;
+ node_info->qos_params.thmp = pdata_node_info->qos_params.thmp;
+ node_info->qos_params.ws = pdata_node_info->qos_params.ws;
+ node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer;
+ node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth;
+ node_info->agg_params.agg_scheme =
+ pdata_node_info->agg_params.agg_scheme;
+ node_info->agg_params.vrail_comp =
+ pdata_node_info->agg_params.vrail_comp;
+ node_info->agg_params.num_aggports =
+ pdata_node_info->agg_params.num_aggports;
+ node_info->agg_params.num_util_levels =
+ pdata_node_info->agg_params.num_util_levels;
+ node_info->agg_params.util_levels = devm_kzalloc(bus_dev,
+ sizeof(struct node_util_levels_type) *
+ node_info->agg_params.num_util_levels,
+ GFP_KERNEL);
+ if (!node_info->agg_params.util_levels) {
+ MSM_BUS_ERR("%s: Agg util level alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+ memcpy(node_info->agg_params.util_levels,
+ pdata_node_info->agg_params.util_levels,
+ sizeof(struct node_util_levels_type) *
+ pdata_node_info->agg_params.num_util_levels);
+
+ node_info->dev_connections = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_connections,
+ GFP_KERNEL);
+ if (!node_info->dev_connections) {
+ MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->connections = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_connections,
+ GFP_KERNEL);
+ if (!node_info->connections) {
+ MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->connections,
+ pdata_node_info->connections,
+ sizeof(int) * pdata_node_info->num_connections);
+
+ node_info->black_connections = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_blist,
+ GFP_KERNEL);
+ if (!node_info->black_connections) {
+ MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+ __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->bl_cons = devm_kzalloc(bus_dev,
+ pdata_node_info->num_blist * sizeof(int),
+ GFP_KERNEL);
+ if (!node_info->bl_cons) {
+ MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+ __func__);
+ devm_kfree(bus_dev, node_info->black_connections);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->bl_cons,
+ pdata_node_info->bl_cons,
+ sizeof(int) * pdata_node_info->num_blist);
+
+ node_info->qport = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_qports,
+ GFP_KERNEL);
+ if (!node_info->qport) {
+ MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ devm_kfree(bus_dev, node_info->bl_cons);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->qport,
+ pdata_node_info->qport,
+ sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+ return ret;
+}
+
+static struct device *msm_bus_device_init(
+ struct msm_bus_node_device_type *pdata)
+{
+ struct device *bus_dev = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+ struct msm_bus_node_info_type *node_info = NULL;
+ int ret = 0;
+
+ /**
+ * Init here so we can use devm calls
+ */
+
+ bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+ if (!bus_node) {
+ ret = -ENOMEM;
+ goto err_device_init;
+ }
+ bus_dev = &bus_node->dev;
+ device_initialize(bus_dev);
+
+ node_info = devm_kzalloc(bus_dev,
+ sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+ if (!node_info) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
+
+ bus_node->node_info = node_info;
+ bus_node->ap_owned = pdata->ap_owned;
+ bus_dev->of_node = pdata->of_node;
+
+ ret = msm_bus_copy_node_info(pdata, bus_dev);
+ if (ret)
+ goto err_put_device;
+
+ bus_dev->bus = &msm_bus_type;
+ dev_set_name(bus_dev, bus_node->node_info->name);
+
+ ret = device_add(bus_dev);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error registering device %d",
+ __func__, pdata->node_info->id);
+ goto err_put_device;
+ }
+ device_create_file(bus_dev, &dev_attr_bw);
+ INIT_LIST_HEAD(&bus_node->devlist);
+ return bus_dev;
+
+err_put_device:
+ put_device(bus_dev);
+ bus_dev = NULL;
+ kfree(bus_node);
+err_device_init:
+ return ERR_PTR(ret);
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int ret = 0;
+ int j;
+ struct msm_bus_node_device_type *fab;
+
+ bus_node = to_msm_bus_node(bus_dev);
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+
+ /* Setup parent bus device for this node */
+ if (!bus_node->node_info->is_fab_dev) {
+ struct device *bus_parent_device =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->bus_device_id,
+ msm_bus_device_match_adhoc);
+
+ if (!bus_parent_device) {
+ MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+ __func__,
+ bus_node->node_info->id,
+ bus_node->node_info->bus_device_id);
+ ret = -ENXIO;
+ goto exit_setup_dev_conn;
+ }
+ bus_node->node_info->bus_device = bus_parent_device;
+ fab = to_msm_bus_node(bus_parent_device);
+ list_add_tail(&bus_node->dev_link, &fab->devlist);
+ }
+
+ bus_node->node_info->is_traversed = false;
+
+ for (j = 0; j < bus_node->node_info->num_connections; j++) {
+ bus_node->node_info->dev_connections[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->connections[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->dev_connections[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+ __func__, bus_node->node_info->connections[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ }
+
+ for (j = 0; j < bus_node->node_info->num_blist; j++) {
+ bus_node->node_info->black_connections[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)
+ &bus_node->node_info->bl_cons[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->black_connections[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+ __func__,
+ bus_node->node_info->bl_cons[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ }
+
+exit_setup_dev_conn:
+ return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+ int j;
+ int ret = 0;
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = to_msm_bus_node(bus_dev);
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ ret = -ENODEV;
+ goto exit_node_debug;
+ }
+
+ MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+ bus_node->node_info->agg_params.buswidth);
+ for (j = 0; j < bus_node->node_info->num_connections; j++) {
+ struct msm_bus_node_device_type *bdev =
+ to_msm_bus_node(bus_node->node_info->dev_connections[j]);
+ MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+ }
+
+ if (bus_node->node_info->is_fab_dev)
+ msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+ return ret;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = to_msm_bus_node(dev);
+
+ if (bus_node)
+ MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+ bus_node->node_info->id);
+ device_unregister(dev);
+ kfree(bus_node);
+ return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+ return 0;
+}
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+ unsigned int i, ret;
+ struct msm_bus_device_node_registration *pdata;
+
+ /* If possible, get pdata from device-tree */
+ if (pdev->dev.of_node)
+ pdata = msm_bus_of_to_pdata(pdev);
+ else {
+ pdata =
+ (struct msm_bus_device_node_registration *)
+ pdev->dev.platform_data;
+ }
+
+ if (IS_ERR_OR_NULL(pdata)) {
+ MSM_BUS_ERR("No platform data found");
+ ret = -ENODATA;
+ goto exit_device_probe;
+ }
+
+ for (i = 0; i < pdata->num_devices; i++) {
+ struct device *node_dev = NULL;
+
+ node_dev = msm_bus_device_init(&pdata->info[i]);
+
+ if (IS_ERR(node_dev)) {
+ MSM_BUS_ERR("%s: Error during dev init for %d",
+ __func__, pdata->info[i].node_info->id);
+ ret = PTR_ERR(node_dev);
+ goto exit_device_probe;
+ }
+
+ ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+ if (ret) {
+ MSM_BUS_ERR("\n Failed to init bus clk. ret %d", ret);
+ msm_bus_device_remove(pdev);
+ goto exit_device_probe;
+ }
+ /*Is this a fabric device ?*/
+ if (pdata->info[i].node_info->is_fab_dev) {
+ MSM_BUS_DBG("%s: %d is a fab", __func__,
+ pdata->info[i].node_info->id);
+ ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error intializing fab %d",
+ __func__, pdata->info[i].node_info->id);
+ goto exit_device_probe;
+ }
+ }
+ }
+
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ msm_bus_setup_dev_conn);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+ goto exit_device_probe;
+ }
+
+ /*
+ * Setup the QoS for the nodes, don't check the error codes as we
+ * defer QoS programming to the first transaction in cases of failure
+ * and we want to continue the probe.
+ */
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+
+ /* Register the arb layer ops */
+ msm_bus_arb_setops_adhoc(&arb_ops);
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+ devm_kfree(&pdev->dev, pdata->info);
+ devm_kfree(&pdev->dev, pdata);
+
+ dev_info(&pdev->dev, "Bus scaling driver probe successful\n");
+
+exit_device_probe:
+ return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+ struct bus_rule_type *rule_data = NULL;
+ int num_rules = 0;
+
+ num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+ if (!rule_data)
+ goto exit_rules_probe;
+
+ msm_rule_register(num_rules, rule_data, NULL);
+ static_rules.num_rules = num_rules;
+ static_rules.rules = rule_data;
+ pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+ return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+ struct static_rules_type *static_rules = NULL;
+
+ static_rules = pdev->dev.platform_data;
+ if (static_rules)
+ msm_rule_unregister(static_rules->num_rules,
+ static_rules->rules, NULL);
+ return 0;
+}
+
+
+static const struct of_device_id rules_match[] = {
+ {.compatible = "qcom,msm-bus-static-bw-rules"},
+ {}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+ .probe = msm_bus_device_rules_probe,
+ .remove = msm_bus_device_rules_remove,
+ .driver = {
+ .name = "msm_bus_rules_device",
+ .of_match_table = rules_match,
+ },
+};
+
+static const struct of_device_id fabric_match[] = {
+ {.compatible = "qcom,msm-bus-device"},
+ {}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+ .probe = msm_bus_device_probe,
+ .remove = msm_bus_device_remove,
+ .driver = {
+ .name = "msm_bus_device",
+ .of_match_table = fabric_match,
+ },
+};
+
+int __init msm_bus_device_init_driver(void)
+{
+ int rc;
+
+ MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
+ rc = platform_driver_register(&msm_bus_device_driver);
+
+ if (rc) {
+ MSM_BUS_ERR("Failed to register bus device driver");
+ return rc;
+ }
+ return platform_driver_register(&msm_bus_rules_driver);
+}
+fs_initcall(msm_bus_device_init_driver);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
new file mode 100644
index 0000000..1665104
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -0,0 +1,2032 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+#include <trace/events/trace_msm_bus.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+#define MSM_BUS_RSC_COUNT (MSM_BUS_RSC_LAST-MSM_BUS_RSC_FIRST+1)
+
+#define BCM_TCS_CMD_COMMIT_SHFT 30
+#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
+#define BCM_TCS_CMD_VALID_SHFT 29
+#define BCM_TCS_CMD_VALID_MASK 0x20000000
+#define BCM_TCS_CMD_VOTE_X_SHFT 14
+#define BCM_TCS_CMD_VOTE_MASK 0x3FFF
+#define BCM_TCS_CMD_VOTE_Y_SHFT 0
+#define BCM_TCS_CMD_VOTE_Y_MASK 0xFFFC000
+
+#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
+ (((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\
+ ((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\
+ ((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\
+ ((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data);
+static int msm_bus_dev_sbm_config(struct device *dev, bool enable);
+
+static struct list_head bcm_query_list_inorder[VCD_MAX_CNT];
+static struct msm_bus_node_device_type *cur_rsc;
+static bool init_time = true;
+
+struct msm_bus_rsc_client {
+ uint32_t rsc_id;
+ struct device *client;
+};
+
+struct msm_bus_rsc_client rsc_clients[MSM_BUS_RSC_COUNT];
+
+struct bcm_db {
+ uint32_t unit_size;
+ uint16_t width;
+ uint8_t clk_domain;
+ uint8_t reserved;
+};
+
+ssize_t bw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct msm_bus_node_info_type *node_info = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int i;
+ int off = 0;
+
+ bus_node = to_msm_bus_node(dev);
+ if (!bus_node)
+ return -EINVAL;
+
+ node_info = bus_node->node_info;
+
+ for (i = 0; i < bus_node->num_lnodes; i++) {
+ if (!bus_node->lnode_list[i].in_use)
+ continue;
+ off += scnprintf((buf + off), PAGE_SIZE,
+ "[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+ i, bus_node->lnode_list[i].cl_name,
+ bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+ bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+ bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+ bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+ }
+ off += scnprintf((buf + off), PAGE_SIZE,
+ "Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+ bus_node->node_bw[ACTIVE_CTX].max_ib,
+ bus_node->node_bw[ACTIVE_CTX].sum_ab,
+ bus_node->node_bw[ACTIVE_CTX].util_used,
+ bus_node->node_bw[ACTIVE_CTX].vrail_used);
+ off += scnprintf((buf + off), PAGE_SIZE,
+ "Max_Slp_IB %llu Sum_Slp_AB %llu Slp_Util_fact %d Slp_Vrail_comp %d\n",
+ bus_node->node_bw[DUAL_CTX].max_ib,
+ bus_node->node_bw[DUAL_CTX].sum_ab,
+ bus_node->node_bw[DUAL_CTX].util_used,
+ bus_node->node_bw[DUAL_CTX].vrail_used);
+ return off;
+}
+
+ssize_t bw_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return count;
+}
+
+DEVICE_ATTR_RW(bw);
+
+struct static_rules_type {
+ int num_rules;
+ struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int bus_get_reg(struct nodeclk *nclk, struct device *dev)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *node_dev;
+
+ if (!(dev && nclk))
+ return -ENXIO;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!strlen(nclk->reg_name)) {
+ dev_dbg(dev, "No regulator exist for node %d\n",
+ node_dev->node_info->id);
+ goto exit_of_get_reg;
+ } else {
+ if (!(IS_ERR_OR_NULL(nclk->reg)))
+ goto exit_of_get_reg;
+
+ nclk->reg = devm_regulator_get(dev, nclk->reg_name);
+ if (IS_ERR_OR_NULL(nclk->reg)) {
+ ret =
+ (IS_ERR(nclk->reg) ? PTR_ERR(nclk->reg) : -ENXIO);
+ dev_err(dev, "Error: Failed to get regulator %s:%d\n",
+ nclk->reg_name, ret);
+ } else {
+ dev_dbg(dev, "Successfully got regulator for %d\n",
+ node_dev->node_info->id);
+ }
+ }
+
+exit_of_get_reg:
+ return ret;
+}
+
+static int bus_enable_reg(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (!nclk) {
+ ret = -ENXIO;
+ goto exit_bus_enable_reg;
+ }
+
+ if ((IS_ERR_OR_NULL(nclk->reg))) {
+ ret = -ENXIO;
+ goto exit_bus_enable_reg;
+ }
+
+ ret = regulator_enable(nclk->reg);
+ if (ret) {
+ MSM_BUS_ERR("Failed to enable regulator for %s\n",
+ nclk->reg_name);
+ goto exit_bus_enable_reg;
+ }
+ pr_debug("%s: Enabled Reg\n", __func__);
+exit_bus_enable_reg:
+ return ret;
+}
+
+static int bus_disable_reg(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (!nclk) {
+ ret = -ENXIO;
+ goto exit_bus_disable_reg;
+ }
+
+ if ((IS_ERR_OR_NULL(nclk->reg))) {
+ ret = -ENXIO;
+ goto exit_bus_disable_reg;
+ }
+
+ regulator_disable(nclk->reg);
+ pr_debug("%s: Disabled Reg\n", __func__);
+exit_bus_disable_reg:
+ return ret;
+}
+
+static int enable_nodeclk(struct nodeclk *nclk, struct device *dev)
+{
+ int ret = 0;
+
+ if (!nclk->enable && !nclk->setrate_only_clk) {
+ if (dev && strlen(nclk->reg_name)) {
+ if (IS_ERR_OR_NULL(nclk->reg)) {
+ ret = bus_get_reg(nclk, dev);
+ if (ret) {
+ dev_dbg(dev,
+ "Failed to get reg.Err %d\n",
+ ret);
+ goto exit_enable_nodeclk;
+ }
+ }
+
+ ret = bus_enable_reg(nclk);
+ if (ret) {
+ dev_dbg(dev, "Failed to enable reg. Err %d\n",
+ ret);
+ goto exit_enable_nodeclk;
+ }
+ }
+ ret = clk_prepare_enable(nclk->clk);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+ nclk->enable = false;
+ } else
+ nclk->enable = true;
+ }
+exit_enable_nodeclk:
+ return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (nclk->enable && !nclk->setrate_only_clk) {
+ clk_disable_unprepare(nclk->clk);
+ nclk->enable = false;
+ bus_disable_reg(nclk);
+ }
+ return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+ int ret = 0;
+
+ if (!nclk->enable_only_clk)
+ ret = clk_set_rate(nclk->clk, rate);
+
+ if (ret)
+ MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+ return ret;
+}
+
+static int tcs_cmd_gen(struct msm_bus_node_device_type *cur_bcm,
+ struct tcs_cmd *cmd, uint64_t vec_a,
+ uint64_t vec_b, bool commit)
+{
+ int ret = 0;
+ bool valid = true;
+
+ if (!cmd)
+ return ret;
+
+ if (vec_a == 0 && vec_b == 0)
+ valid = false;
+
+ if (vec_a > BCM_TCS_CMD_VOTE_MASK)
+ vec_a = BCM_TCS_CMD_VOTE_MASK;
+
+ if (vec_b > BCM_TCS_CMD_VOTE_MASK)
+ vec_b = BCM_TCS_CMD_VOTE_MASK;
+
+ cmd->addr = cur_bcm->bcmdev->addr;
+ cmd->data = BCM_TCS_CMD(commit, valid, vec_a, vec_b);
+ cmd->wait = commit;
+
+ return ret;
+}
+
+static int tcs_cmd_list_gen(int *n_active,
+ int *n_wake,
+ int *n_sleep,
+ struct tcs_cmd *cmdlist_active,
+ struct tcs_cmd *cmdlist_wake,
+ struct tcs_cmd *cmdlist_sleep,
+ struct list_head *cur_bcm_clist)
+{
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ int i = 0;
+ int k = 0;
+ int idx = 0;
+ int last_tcs = -1;
+ bool commit = false;
+
+ if (!cmdlist_active)
+ goto exit_tcs_cmd_list_gen;
+
+ for (i = 0; i < VCD_MAX_CNT; i++) {
+ last_tcs = -1;
+ if (list_empty(&cur_bcm_clist[i]))
+ continue;
+ list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+ if (cur_bcm->updated ||
+ (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+ init_time)) {
+ if (last_tcs != -1 &&
+ list_is_last(&cur_bcm->link,
+ &cur_bcm_clist[i])) {
+ cmdlist_active[last_tcs].data |=
+ BCM_TCS_CMD_COMMIT_MASK;
+ cmdlist_active[last_tcs].wait
+ = true;
+ }
+ continue;
+ }
+ n_active[idx]++;
+ commit = false;
+ if (list_is_last(&cur_bcm->link,
+ &cur_bcm_clist[i])) {
+ commit = true;
+ idx++;
+ }
+ tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a,
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
+ last_tcs = k;
+ k++;
+ cur_bcm->updated = true;
+ }
+ }
+
+ if (!cmdlist_wake || !cmdlist_sleep)
+ goto exit_tcs_cmd_list_gen;
+
+ k = 0;
+ idx = 0;
+ for (i = 0; i < VCD_MAX_CNT; i++) {
+ last_tcs = -1;
+ if (list_empty(&cur_bcm_clist[i]))
+ continue;
+ list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+ commit = false;
+ if ((cur_bcm->node_vec[DUAL_CTX].vec_a ==
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a) &&
+ (cur_bcm->node_vec[DUAL_CTX].vec_b ==
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b)) {
+ if (last_tcs != -1 &&
+ list_is_last(&cur_bcm->link,
+ &cur_bcm_clist[i])) {
+ cmdlist_wake[last_tcs].data |=
+ BCM_TCS_CMD_COMMIT_MASK;
+ cmdlist_sleep[last_tcs].data |=
+ BCM_TCS_CMD_COMMIT_MASK;
+ cmdlist_wake[last_tcs].wait = true;
+ cmdlist_sleep[last_tcs].wait = true;
+ idx++;
+ }
+ continue;
+ }
+ last_tcs = k;
+ n_sleep[idx]++;
+ n_wake[idx]++;
+ if (list_is_last(&cur_bcm->link,
+ &cur_bcm_clist[i])) {
+ commit = true;
+ idx++;
+ }
+
+ tcs_cmd_gen(cur_bcm, &cmdlist_wake[k],
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a,
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit);
+
+ tcs_cmd_gen(cur_bcm, &cmdlist_sleep[k],
+ cur_bcm->node_vec[DUAL_CTX].vec_a,
+ cur_bcm->node_vec[DUAL_CTX].vec_b, commit);
+ k++;
+ }
+ }
+
+exit_tcs_cmd_list_gen:
+ return k;
+}
+
+static int tcs_cmd_query_list_gen(struct tcs_cmd *cmdlist_active)
+{
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ struct list_head *bcm_list_inorder = NULL;
+ int i = 0;
+ int k = 0;
+ bool commit = false;
+ int ret = 0;
+
+ if (!cmdlist_active)
+ goto exit_tcs_cmd_list_gen;
+
+ bcm_list_inorder = bcm_query_list_inorder;
+
+ for (i = 0; i < VCD_MAX_CNT; i++) {
+ if (list_empty(&bcm_list_inorder[i]))
+ continue;
+ list_for_each_entry(cur_bcm, &bcm_list_inorder[i], query_link) {
+ commit = false;
+ if (list_is_last(&cur_bcm->query_link,
+ &bcm_list_inorder[i])) {
+ commit = true;
+ }
+ tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
+ cur_bcm->node_vec[ACTIVE_CTX].query_vec_a,
+ cur_bcm->node_vec[ACTIVE_CTX].query_vec_b,
+ commit);
+ k++;
+ }
+ }
+
+exit_tcs_cmd_list_gen:
+ return ret;
+}
+
+static int bcm_clist_add(struct msm_bus_node_device_type *cur_dev)
+{
+ int ret = 0;
+ int cur_vcd = 0;
+ int i = 0;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+
+ if (!cur_dev->node_info->num_bcm_devs)
+ goto exit_bcm_clist_add;
+
+ for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+ cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+ cur_vcd = cur_bcm->bcmdev->clk_domain;
+
+ if (!cur_bcm->node_info->num_rsc_devs)
+ goto exit_bcm_clist_add;
+
+ if (!cur_rsc)
+ cur_rsc = to_msm_bus_node(
+ cur_bcm->node_info->rsc_devs[0]);
+
+ if (!cur_bcm->dirty) {
+ list_add_tail(&cur_bcm->link,
+ &cur_rsc->rscdev->bcm_clist[cur_vcd]);
+ cur_bcm->dirty = true;
+ }
+ cur_bcm->updated = false;
+ }
+
+exit_bcm_clist_add:
+ return ret;
+}
+
+static void tcs_cmd_n_shrink(int *n)
+{
+ int i = 0, j = 0, sum = 0;
+
+ do {
+ if (sum + n[i] > MAX_RPMH_PAYLOAD) {
+ n[j] = sum;
+ sum = 0;
+ j++;
+ }
+ sum += n[i];
+ } while (n[i++]);
+
+ n[j] = sum;
+ n[j+1] = 0;
+}
+
+static int bcm_query_list_add(struct msm_bus_node_device_type *cur_dev)
+{
+ int ret = 0;
+ int cur_vcd = 0;
+ int i = 0;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+
+ if (!cur_dev->node_info->num_bcm_devs)
+ goto exit_bcm_query_list_add;
+
+ for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+ cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+ cur_vcd = cur_bcm->bcmdev->clk_domain;
+
+ if (!cur_bcm->query_dirty) {
+ list_add_tail(&cur_bcm->query_link,
+ &bcm_query_list_inorder[cur_vcd]);
+ cur_bcm->query_dirty = true;
+ }
+ }
+
+exit_bcm_query_list_add:
+ return ret;
+}
+
+static int bcm_clist_clean(struct msm_bus_node_device_type *cur_dev)
+{
+ int ret = 0;
+ int i = 0;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+
+ if (!cur_dev->node_info->num_bcm_devs)
+ goto exit_bcm_clist_clean;
+
+ for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+ cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+ if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+ !init_time) {
+ cur_bcm->dirty = false;
+ list_del_init(&cur_bcm->link);
+ }
+ }
+
+exit_bcm_clist_clean:
+ return ret;
+}
+
+static int bcm_query_list_clean(struct msm_bus_node_device_type *cur_dev)
+{
+ int ret = 0;
+ int i = 0;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+
+ if (!cur_dev->node_info->num_bcm_devs)
+ goto exit_bcm_clist_add;
+
+ for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+ cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+ cur_bcm->query_dirty = false;
+ list_del_init(&cur_bcm->query_link);
+ }
+
+exit_bcm_clist_add:
+ return ret;
+}
+
+int msm_bus_commit_data(struct list_head *clist)
+{
+ int ret = 0;
+ int bcm_cnt;
+ struct msm_bus_node_device_type *node = NULL;
+ struct msm_bus_node_device_type *node_tmp = NULL;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ struct tcs_cmd *cmdlist_active = NULL;
+ struct tcs_cmd *cmdlist_wake = NULL;
+ struct tcs_cmd *cmdlist_sleep = NULL;
+ struct device *cur_mbox = NULL;
+ struct list_head *cur_bcm_clist = NULL;
+ int n_active[VCD_MAX_CNT];
+ int n_wake[VCD_MAX_CNT];
+ int n_sleep[VCD_MAX_CNT];
+ int cnt_vcd = 0;
+ int cnt_active = 0;
+ int cnt_wake = 0;
+ int cnt_sleep = 0;
+ int i = 0;
+
+ if (!clist)
+ return ret;
+
+ list_for_each_entry_safe(node, node_tmp, clist, link) {
+ bcm_clist_add(node);
+ msm_bus_dev_sbm_config(&node->dev, false);
+ }
+
+ if (!cur_rsc) {
+ MSM_BUS_ERR("%s: Error for cur_rsc is NULL.\n", __func__);
+ return ret;
+ }
+
+ cur_mbox = cur_rsc->rscdev->mbox;
+ cur_bcm_clist = cur_rsc->rscdev->bcm_clist;
+ cmdlist_active = cur_rsc->rscdev->cmdlist_active;
+ cmdlist_wake = cur_rsc->rscdev->cmdlist_wake;
+ cmdlist_sleep = cur_rsc->rscdev->cmdlist_sleep;
+
+ for (i = 0; i < VCD_MAX_CNT; i++) {
+ n_active[i] = 0;
+ n_wake[i] = 0;
+ n_sleep[i] = 0;
+
+ if (list_empty(&cur_bcm_clist[i]))
+ continue;
+ list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+ if ((cur_bcm->node_vec[DUAL_CTX].vec_a !=
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a) ||
+ (cur_bcm->node_vec[DUAL_CTX].vec_b !=
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b)) {
+ cnt_sleep++;
+ cnt_wake++;
+ }
+ if (cur_bcm->updated ||
+ (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+ cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+ init_time))
+ continue;
+ cnt_active++;
+ }
+ cnt_vcd++;
+ }
+
+ if (!cnt_active)
+ goto exit_msm_bus_commit_data;
+
+ bcm_cnt = tcs_cmd_list_gen(n_active, n_wake, n_sleep, cmdlist_active,
+ cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
+
+ tcs_cmd_n_shrink(n_active);
+ tcs_cmd_n_shrink(n_wake);
+ tcs_cmd_n_shrink(n_sleep);
+
+ ret = rpmh_invalidate(cur_mbox);
+ if (ret)
+ MSM_BUS_ERR("%s: Error invalidating mbox: %d\n",
+ __func__, ret);
+
+ if (cur_rsc->node_info->id == MSM_BUS_RSC_DISP) {
+ ret = rpmh_write_batch(cur_mbox, cur_rsc->rscdev->req_state,
+ cmdlist_active, n_active);
+ /*
+ * Ignore -EBUSY from rpmh_write if it's an AMC
+ * request to Display RSC which are invalid when
+ * the display RSC is in solver mode and the bus driver
+ * does not know the current state of the display RSC.
+ */
+ if (ret && ret != -EBUSY)
+ MSM_BUS_ERR("%s: error sending active/awake sets: %d\n",
+ __func__, ret);
+ } else {
+ ret = rpmh_write_batch(cur_mbox, cur_rsc->rscdev->req_state,
+ cmdlist_active, n_active);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending active/awake sets: %d\n",
+ __func__, ret);
+ }
+ if (cnt_wake) {
+ ret = rpmh_write_batch(cur_mbox, RPMH_WAKE_ONLY_STATE,
+ cmdlist_wake, n_wake);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending wake sets: %d\n",
+ __func__, ret);
+ }
+ if (cnt_sleep) {
+ ret = rpmh_write_batch(cur_mbox, RPMH_SLEEP_STATE,
+ cmdlist_sleep, n_sleep);
+ if (ret)
+ MSM_BUS_ERR("%s: error sending sleep sets: %d\n",
+ __func__, ret);
+ }
+
+ list_for_each_entry_safe(node, node_tmp, clist, link) {
+ if (unlikely(node->node_info->defer_qos))
+ msm_bus_dev_init_qos(&node->dev, NULL);
+ msm_bus_dev_sbm_config(&node->dev, true);
+ }
+
+exit_msm_bus_commit_data:
+ list_for_each_entry_safe(node, node_tmp, clist, link) {
+ bcm_clist_clean(node);
+ node->dirty = false;
+ list_del_init(&node->link);
+ }
+ cur_rsc = NULL;
+ return ret;
+}
+
+int msm_bus_query_gen(struct list_head *query_list,
+ struct msm_bus_tcs_usecase *tcs_usecase)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *node = NULL;
+ struct msm_bus_node_device_type *node_tmp = NULL;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ int *n_active = NULL;
+ int cnt_vcd = 0;
+ int cnt_active = 0;
+ int i = 0;
+
+ list_for_each_entry_safe(node, node_tmp, query_list, query_link)
+ bcm_query_list_add(node);
+
+ for (i = 0; i < VCD_MAX_CNT; i++) {
+ if (list_empty(&bcm_query_list_inorder[i]))
+ continue;
+ list_for_each_entry(cur_bcm, &bcm_query_list_inorder[i],
+ query_link) {
+ cnt_active++;
+ }
+ cnt_vcd++;
+ }
+
+ tcs_usecase->num_cmds = cnt_active;
+ ret = tcs_cmd_query_list_gen(tcs_usecase->cmds);
+
+ list_for_each_entry_safe(node, node_tmp, query_list, query_link) {
+ bcm_query_list_clean(node);
+ node->query_dirty = false;
+ list_del_init(&node->query_link);
+ }
+
+ kfree(n_active);
+ return ret;
+}
+
+static void bcm_commit_single_req(struct msm_bus_node_device_type *cur_bcm,
+ uint64_t vec_a, uint64_t vec_b)
+{
+ struct msm_bus_node_device_type *cur_rsc = NULL;
+ struct device *cur_mbox = NULL;
+ struct tcs_cmd cmd_active;
+
+ if (!cur_bcm->node_info->num_rsc_devs)
+ return;
+
+ cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+ cur_mbox = cur_rsc->rscdev->mbox;
+
+ tcs_cmd_gen(cur_bcm, &cmd_active, vec_a, vec_b, true);
+ rpmh_write(cur_mbox, RPMH_ACTIVE_ONLY_STATE, &cmd_active, 1);
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+ size_t new_size, gfp_t flags)
+{
+ void *ret;
+ size_t copy_size = old_size;
+
+ if (!new_size) {
+ devm_kfree(dev, p);
+ return ZERO_SIZE_PTR;
+ }
+
+ if (new_size < old_size)
+ copy_size = new_size;
+
+ ret = devm_kzalloc(dev, new_size, flags);
+ if (!ret) {
+ MSM_BUS_ERR("%s: Error Reallocating memory", __func__);
+ goto exit_realloc_devmem;
+ }
+
+ memcpy(ret, p, copy_size);
+ devm_kfree(dev, p);
+exit_realloc_devmem:
+ return ret;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ switch (bus_dev->fabdev->bus_type) {
+ case MSM_BUS_NOC:
+ msm_bus_noc_set_ops(bus_dev);
+ break;
+ case MSM_BUS_BIMC:
+ msm_bus_bimc_set_ops(bus_dev);
+ break;
+ default:
+ MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+ }
+}
+
+static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+ int i;
+ int ret = 0;
+
+ if (!node) {
+ ret = -ENXIO;
+ goto exit_disable_node_qos_clk;
+ }
+
+ for (i = 0; i < node->num_node_qos_clks; i++)
+ ret = disable_nodeclk(&node->node_qos_clks[i]);
+
+exit_disable_node_qos_clk:
+ return ret;
+}
+
+static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+ int i;
+ int ret = 0;
+ long rounded_rate;
+
+ for (i = 0; i < node->num_node_qos_clks; i++) {
+ if (!node->node_qos_clks[i].enable_only_clk) {
+ rounded_rate =
+ clk_round_rate(
+ node->node_qos_clks[i].clk, 1);
+ ret = setrate_nodeclk(&node->node_qos_clks[i],
+ rounded_rate);
+ if (ret)
+ MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+ __func__, node->node_info->id);
+ }
+ ret = enable_nodeclk(&node->node_qos_clks[i],
+ node->node_info->bus_device);
+ if (ret) {
+ MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+ __func__, ret);
+ msm_bus_disable_node_qos_clk(node);
+ goto exit_enable_node_qos_clk;
+ }
+ }
+exit_enable_node_qos_clk:
+ return ret;
+}
+
+static int msm_bus_vote_qos_bcms(struct msm_bus_node_device_type *node)
+{
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ int i;
+ struct device *dev = NULL;
+
+ if (!node || (!to_msm_bus_node(node->node_info->bus_device)))
+ return -ENXIO;
+
+ cur_dev = node;
+
+ for (i = 0; i < cur_dev->num_qos_bcms; i++) {
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &cur_dev->qos_bcms[i].qos_bcm_id,
+ msm_bus_device_match_adhoc);
+
+ if (!dev) {
+ MSM_BUS_ERR("Can't find dev node for %d",
+ cur_dev->qos_bcms[i].qos_bcm_id);
+ return -ENODEV;
+ }
+
+ cur_bcm = to_msm_bus_node(dev);
+ if (cur_bcm->node_vec[ACTIVE_CTX].vec_a != 0 ||
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b != 0 ||
+ cur_bcm->node_vec[DUAL_CTX].vec_a != 0 ||
+ cur_bcm->node_vec[DUAL_CTX].vec_b != 0)
+ return 0;
+
+ bcm_commit_single_req(cur_bcm,
+ cur_dev->qos_bcms[i].vec.vec_a,
+ cur_dev->qos_bcms[i].vec.vec_b);
+ }
+
+ return 0;
+}
+
+static int msm_bus_rm_vote_qos_bcms(struct msm_bus_node_device_type *node)
+{
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ struct msm_bus_node_device_type *cur_bcm = NULL;
+ int i;
+ struct device *dev = NULL;
+
+ if (!node || (!to_msm_bus_node(node->node_info->bus_device)))
+ return -ENXIO;
+
+ cur_dev = node;
+
+ for (i = 0; i < cur_dev->num_qos_bcms; i++) {
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &cur_dev->qos_bcms[i].qos_bcm_id,
+ msm_bus_device_match_adhoc);
+
+ if (!dev) {
+ MSM_BUS_ERR("Can't find dev node for %d",
+ cur_dev->qos_bcms[i].qos_bcm_id);
+ return -ENODEV;
+ }
+
+ cur_bcm = to_msm_bus_node(dev);
+ if (cur_bcm->node_vec[ACTIVE_CTX].vec_a != 0 ||
+ cur_bcm->node_vec[ACTIVE_CTX].vec_b != 0 ||
+ cur_bcm->node_vec[DUAL_CTX].vec_a != 0 ||
+ cur_bcm->node_vec[DUAL_CTX].vec_b != 0)
+ return 0;
+
+ bcm_commit_single_req(cur_bcm, 0, 0);
+ }
+
+ return 0;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+ int enable, uint64_t lim_bw)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *bus_node_dev;
+
+ if (!node_dev) {
+ MSM_BUS_ERR("No device specified");
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+
+ if (!node_dev->ap_owned) {
+ MSM_BUS_ERR("Device is not AP owned %d",
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+
+ bus_node_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+ if (!bus_node_dev) {
+ MSM_BUS_ERR("Unable to get bus device infofor %d",
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+ if (bus_node_dev->fabdev &&
+ bus_node_dev->fabdev->noc_ops.limit_mport) {
+ if (ret < 0) {
+ MSM_BUS_ERR("Can't Enable QoS clk %d",
+ node_dev->node_info->id);
+ goto exit_enable_limiter;
+ }
+ bus_node_dev->fabdev->noc_ops.limit_mport(
+ node_dev,
+ bus_node_dev->fabdev->qos_base,
+ bus_node_dev->fabdev->base_offset,
+ bus_node_dev->fabdev->qos_off,
+ bus_node_dev->fabdev->qos_freq,
+ enable, lim_bw);
+ }
+
+exit_enable_limiter:
+ return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *node_dev = NULL;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+ ret = -ENXIO;
+ goto exit_init_qos;
+ }
+
+ MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+ if (node_dev->node_info->qos_params.defer_init_qos) {
+ node_dev->node_info->qos_params.defer_init_qos = false;
+ node_dev->node_info->defer_qos = true;
+ goto exit_init_qos;
+ }
+
+ if (node_dev->ap_owned) {
+ struct msm_bus_node_device_type *bus_node_info;
+
+ bus_node_info =
+ to_msm_bus_node(node_dev->node_info->bus_device);
+
+ if (!bus_node_info) {
+ MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+ __func__,
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_init_qos;
+ }
+
+ if (bus_node_info->fabdev &&
+ bus_node_info->fabdev->noc_ops.qos_init) {
+ int ret = 0;
+
+ if (node_dev->ap_owned) {
+ if (bus_node_info->fabdev->bypass_qos_prg)
+ goto exit_init_qos;
+
+ ret = msm_bus_vote_qos_bcms(node_dev);
+ ret = msm_bus_enable_node_qos_clk(node_dev);
+ if (ret < 0) {
+ MSM_BUS_DBG("Can't Enable QoS clk %d\n",
+ node_dev->node_info->id);
+ node_dev->node_info->defer_qos = true;
+ goto exit_init_qos;
+ }
+
+ bus_node_info->fabdev->noc_ops.qos_init(
+ node_dev,
+ bus_node_info->fabdev->qos_base,
+ bus_node_info->fabdev->base_offset,
+ bus_node_info->fabdev->qos_off,
+ bus_node_info->fabdev->qos_freq);
+ ret = msm_bus_disable_node_qos_clk(node_dev);
+ ret = msm_bus_rm_vote_qos_bcms(node_dev);
+ node_dev->node_info->defer_qos = false;
+ }
+ } else
+ MSM_BUS_ERR("%s: Skipping QOS init for %d",
+ __func__, node_dev->node_info->id);
+ }
+exit_init_qos:
+ return ret;
+}
+
+static int msm_bus_dev_sbm_config(struct device *dev, bool enable)
+{
+ int ret = 0, idx = 0;
+ struct msm_bus_node_device_type *node_dev = NULL;
+ struct msm_bus_node_device_type *fab_dev = NULL;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+ return -ENXIO;
+ }
+
+ if (!node_dev->node_info->num_disable_ports)
+ return 0;
+
+ if ((node_dev->node_bw[DUAL_CTX].sum_ab ||
+ node_dev->node_bw[DUAL_CTX].max_ib ||
+ !node_dev->is_connected) && !enable)
+ return 0;
+ else if (((!node_dev->node_bw[DUAL_CTX].sum_ab &&
+ !node_dev->node_bw[DUAL_CTX].max_ib) ||
+ node_dev->is_connected) && enable)
+ return 0;
+
+ if (enable) {
+ for (idx = 0; idx < node_dev->num_regs; idx++) {
+ if (!node_dev->node_regs[idx].reg)
+ node_dev->node_regs[idx].reg =
+ devm_regulator_get(dev,
+ node_dev->node_regs[idx].name);
+
+ if ((IS_ERR_OR_NULL(node_dev->node_regs[idx].reg)))
+ return -ENXIO;
+ ret = regulator_enable(node_dev->node_regs[idx].reg);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to enable reg:%s\n",
+ __func__, node_dev->node_regs[idx].name);
+ return ret;
+ }
+ }
+ node_dev->is_connected = true;
+ }
+
+ fab_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+ if (!fab_dev) {
+ MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+ __func__,
+ node_dev->node_info->id);
+ return -ENXIO;
+ }
+
+ if (fab_dev->fabdev &&
+ fab_dev->fabdev->noc_ops.sbm_config) {
+ ret = fab_dev->fabdev->noc_ops.sbm_config(
+ node_dev,
+ fab_dev->fabdev->qos_base,
+ fab_dev->fabdev->sbm_offset,
+ enable);
+ }
+
+ if (!enable) {
+ for (idx = 0; idx < node_dev->num_regs; idx++) {
+ if (!node_dev->node_regs[idx].reg)
+ node_dev->node_regs[idx].reg =
+ devm_regulator_get(dev,
+ node_dev->node_regs[idx].name);
+
+ if ((IS_ERR_OR_NULL(node_dev->node_regs[idx].reg)))
+ return -ENXIO;
+ ret = regulator_disable(node_dev->node_regs[idx].reg);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to disable reg:%s\n",
+ __func__, node_dev->node_regs[idx].name);
+ return ret;
+ }
+ }
+ node_dev->is_connected = false;
+ }
+ return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ struct msm_bus_fab_device_type *fabdev;
+ struct msm_bus_node_device_type *node_dev = NULL;
+ int ret = 0;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Unable to get bus device info", __func__);
+ ret = -ENXIO;
+ goto exit_fabric_init;
+ }
+
+ if (node_dev->node_info->virt_dev) {
+ MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+ node_dev->node_info->id);
+ goto exit_fabric_init;
+ }
+
+ fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+ GFP_KERNEL);
+ if (!fabdev) {
+ MSM_BUS_ERR("Fabric alloc failed\n");
+ ret = -ENOMEM;
+ goto exit_fabric_init;
+ }
+
+ node_dev->fabdev = fabdev;
+ fabdev->pqos_base = pdata->fabdev->pqos_base;
+ fabdev->qos_range = pdata->fabdev->qos_range;
+ fabdev->base_offset = pdata->fabdev->base_offset;
+ fabdev->qos_off = pdata->fabdev->qos_off;
+ fabdev->qos_freq = pdata->fabdev->qos_freq;
+ fabdev->bus_type = pdata->fabdev->bus_type;
+ fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+ fabdev->sbm_offset = pdata->fabdev->sbm_offset;
+ msm_bus_fab_init_noc_ops(node_dev);
+
+ fabdev->qos_base = devm_ioremap(dev,
+ fabdev->pqos_base, fabdev->qos_range);
+ if (!fabdev->qos_base) {
+ MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+ __func__,
+ (size_t)fabdev->pqos_base, node_dev->node_info->id);
+ ret = -ENOMEM;
+ goto exit_fabric_init;
+ }
+
+exit_fabric_init:
+ return ret;
+}
+
+static int msm_bus_bcm_init(struct device *dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ struct msm_bus_bcm_device_type *bcmdev;
+ struct msm_bus_node_device_type *node_dev = NULL;
+ struct bcm_db aux_data = {0};
+ int ret = 0;
+ int i = 0;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!node_dev) {
+ ret = -ENXIO;
+ goto exit_bcm_init;
+ }
+
+ bcmdev = devm_kzalloc(dev, sizeof(struct msm_bus_bcm_device_type),
+ GFP_KERNEL);
+ if (!bcmdev) {
+ ret = -ENOMEM;
+ goto exit_bcm_init;
+ }
+
+ node_dev->bcmdev = bcmdev;
+ bcmdev->name = pdata->bcmdev->name;
+ if (!cmd_db_read_aux_data_len(bcmdev->name)) {
+ MSM_BUS_ERR("%s: Error getting bcm info, bcm:%s",
+ __func__, bcmdev->name);
+ ret = -ENXIO;
+ goto exit_bcm_init;
+ }
+
+ cmd_db_read_aux_data(bcmdev->name, (u8 *)&aux_data,
+ sizeof(struct bcm_db));
+ bcmdev->addr = cmd_db_read_addr(bcmdev->name);
+ bcmdev->width = (uint32_t)aux_data.width;
+ bcmdev->clk_domain = aux_data.clk_domain;
+ bcmdev->unit_size = aux_data.unit_size;
+ bcmdev->type = 0;
+ bcmdev->num_bus_devs = 0;
+
+ // Add way to count # of VCDs, initialize LL
+ for (i = 0; i < VCD_MAX_CNT; i++)
+ INIT_LIST_HEAD(&bcm_query_list_inorder[i]);
+
+exit_bcm_init:
+ return ret;
+}
+
+static int msm_bus_rsc_init(struct platform_device *pdev,
+ struct device *dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ struct msm_bus_rsc_device_type *rscdev;
+ struct msm_bus_node_device_type *node_dev = NULL;
+ int ret = 0;
+ int i = 0;
+
+ node_dev = to_msm_bus_node(dev);
+ if (!node_dev) {
+ ret = -ENXIO;
+ goto exit_rsc_init;
+ }
+
+ rscdev = devm_kzalloc(dev, sizeof(struct msm_bus_rsc_device_type),
+ GFP_KERNEL);
+ if (!rscdev) {
+ ret = -ENOMEM;
+ goto exit_rsc_init;
+ }
+
+ node_dev->rscdev = rscdev;
+ rscdev->req_state = pdata->rscdev->req_state;
+
+ for (i = 0; i < MSM_BUS_RSC_COUNT; i++) {
+ if (rsc_clients[i].rsc_id == node_dev->node_info->id) {
+ rscdev->mbox = rsc_clients[i].client;
+
+ if (IS_ERR_OR_NULL(rscdev->mbox)) {
+ MSM_BUS_ERR("%s: Failed to get mbox:%s",
+ __func__, node_dev->node_info->name);
+ }
+ break;
+ }
+ }
+
+
+ // Add way to count # of VCDs, initialize LL
+ for (i = 0; i < VCD_MAX_CNT; i++)
+ INIT_LIST_HEAD(&rscdev->bcm_clist[i]);
+
+exit_rsc_init:
+ return ret;
+}
+
+static int msm_bus_postcon_setup(struct device *bus_dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ struct msm_bus_rsc_device_type *rscdev;
+
+ bus_node = to_msm_bus_node(bus_dev);
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ return -ENODEV;
+ }
+
+ if (bus_node->node_info->is_rsc_dev) {
+ rscdev = bus_node->rscdev;
+ rscdev->cmdlist_active = devm_kcalloc(bus_dev,
+ rscdev->num_bcm_devs,
+ sizeof(struct tcs_cmd), GFP_KERNEL);
+ if (!rscdev->cmdlist_active)
+ return -ENOMEM;
+
+ rscdev->cmdlist_wake = devm_kcalloc(bus_dev,
+ rscdev->num_bcm_devs,
+ sizeof(struct tcs_cmd), GFP_KERNEL);
+ if (!rscdev->cmdlist_wake)
+ return -ENOMEM;
+
+ rscdev->cmdlist_sleep = devm_kcalloc(bus_dev,
+ rscdev->num_bcm_devs,
+ sizeof(struct tcs_cmd), GFP_KERNEL);
+ if (!rscdev->cmdlist_sleep)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int msm_bus_init_clk(struct device *bus_dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ unsigned int ctx;
+ struct msm_bus_node_device_type *node_dev = to_msm_bus_node(bus_dev);
+ int i;
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+ node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+ node_dev->clk[ctx].enable_only_clk =
+ pdata->clk[ctx].enable_only_clk;
+ node_dev->clk[ctx].setrate_only_clk =
+ pdata->clk[ctx].setrate_only_clk;
+ node_dev->clk[ctx].enable = false;
+ node_dev->clk[ctx].dirty = false;
+ strlcpy(node_dev->clk[ctx].reg_name,
+ pdata->clk[ctx].reg_name, MAX_REG_NAME);
+ node_dev->clk[ctx].reg = NULL;
+ bus_get_reg(&node_dev->clk[ctx], bus_dev);
+ MSM_BUS_DBG("%s: Valid node clk node %d ctx %d\n",
+ __func__, node_dev->node_info->id, ctx);
+ }
+ }
+
+ if (!IS_ERR_OR_NULL(pdata->bus_qos_clk.clk)) {
+ node_dev->bus_qos_clk.clk = pdata->bus_qos_clk.clk;
+ node_dev->bus_qos_clk.enable_only_clk =
+ pdata->bus_qos_clk.enable_only_clk;
+ node_dev->bus_qos_clk.setrate_only_clk =
+ pdata->bus_qos_clk.setrate_only_clk;
+ node_dev->bus_qos_clk.enable = false;
+ strlcpy(node_dev->bus_qos_clk.reg_name,
+ pdata->bus_qos_clk.reg_name, MAX_REG_NAME);
+ node_dev->bus_qos_clk.reg = NULL;
+ MSM_BUS_DBG("%s: Valid bus qos clk node %d\n", __func__,
+ node_dev->node_info->id);
+ }
+
+ if (pdata->num_node_qos_clks) {
+ node_dev->num_node_qos_clks = pdata->num_node_qos_clks;
+ node_dev->node_qos_clks = devm_kzalloc(bus_dev,
+ (node_dev->num_node_qos_clks * sizeof(struct nodeclk)),
+ GFP_KERNEL);
+ if (!node_dev->node_qos_clks)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_node_qos_clks; i++) {
+ node_dev->node_qos_clks[i].clk =
+ pdata->node_qos_clks[i].clk;
+ node_dev->node_qos_clks[i].enable_only_clk =
+ pdata->node_qos_clks[i].enable_only_clk;
+ node_dev->node_qos_clks[i].setrate_only_clk =
+ pdata->node_qos_clks[i].setrate_only_clk;
+ node_dev->node_qos_clks[i].enable = false;
+ strlcpy(node_dev->node_qos_clks[i].reg_name,
+ pdata->node_qos_clks[i].reg_name, MAX_REG_NAME);
+ node_dev->node_qos_clks[i].reg = NULL;
+ MSM_BUS_DBG("%s: Valid qos clk[%d] node %d %d Reg%s\n",
+ __func__, i,
+ node_dev->node_info->id,
+ node_dev->num_node_qos_clks,
+ node_dev->node_qos_clks[i].reg_name);
+ }
+ }
+
+ return 0;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+ struct device *bus_dev)
+{
+ int ret = 0, i = 0;
+ struct msm_bus_node_info_type *node_info = NULL;
+ struct msm_bus_node_info_type *pdata_node_info = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = to_msm_bus_node(bus_dev);
+
+ if (!bus_node || !pdata) {
+ ret = -ENXIO;
+ MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p",
+ __func__, pdata, bus_node);
+ goto exit_copy_node_info;
+ }
+
+ node_info = bus_node->node_info;
+ pdata_node_info = pdata->node_info;
+
+ node_info->name = pdata_node_info->name;
+ node_info->id = pdata_node_info->id;
+ node_info->bcm_req_idx = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_bcm_devs,
+ GFP_KERNEL);
+ if (!node_info->bcm_req_idx) {
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ for (i = 0; i < pdata_node_info->num_bcm_devs; i++)
+ node_info->bcm_req_idx[i] = -1;
+
+ node_info->bus_device_id = pdata_node_info->bus_device_id;
+ node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+ node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+ node_info->num_connections = pdata_node_info->num_connections;
+ node_info->num_blist = pdata_node_info->num_blist;
+ node_info->num_bcm_devs = pdata_node_info->num_bcm_devs;
+ node_info->num_rsc_devs = pdata_node_info->num_rsc_devs;
+ node_info->num_qports = pdata_node_info->num_qports;
+ node_info->num_disable_ports = pdata_node_info->num_disable_ports;
+ node_info->disable_ports = pdata_node_info->disable_ports;
+ node_info->virt_dev = pdata_node_info->virt_dev;
+ node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+ node_info->is_bcm_dev = pdata_node_info->is_bcm_dev;
+ node_info->is_rsc_dev = pdata_node_info->is_rsc_dev;
+ node_info->qos_params.prio_dflt = pdata_node_info->qos_params.prio_dflt;
+ node_info->qos_params.limiter.bw =
+ pdata_node_info->qos_params.limiter.bw;
+ node_info->qos_params.limiter.sat =
+ pdata_node_info->qos_params.limiter.sat;
+ node_info->qos_params.limiter_en =
+ pdata_node_info->qos_params.limiter_en;
+ node_info->qos_params.reg.low_prio =
+ pdata_node_info->qos_params.reg.low_prio;
+ node_info->qos_params.reg.hi_prio =
+ pdata_node_info->qos_params.reg.hi_prio;
+ node_info->qos_params.reg.bw =
+ pdata_node_info->qos_params.reg.bw;
+ node_info->qos_params.reg.sat =
+ pdata_node_info->qos_params.reg.sat;
+ node_info->qos_params.reg_mode.read =
+ pdata_node_info->qos_params.reg_mode.read;
+ node_info->qos_params.reg_mode.write =
+ pdata_node_info->qos_params.reg_mode.write;
+ node_info->qos_params.urg_fwd_en =
+ pdata_node_info->qos_params.urg_fwd_en;
+ node_info->qos_params.defer_init_qos =
+ pdata_node_info->qos_params.defer_init_qos;
+ node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth;
+ node_info->agg_params.agg_scheme =
+ pdata_node_info->agg_params.agg_scheme;
+ node_info->agg_params.vrail_comp =
+ pdata_node_info->agg_params.vrail_comp;
+ node_info->agg_params.num_aggports =
+ pdata_node_info->agg_params.num_aggports;
+ node_info->agg_params.num_util_levels =
+ pdata_node_info->agg_params.num_util_levels;
+ node_info->agg_params.util_levels = devm_kzalloc(bus_dev,
+ sizeof(struct node_util_levels_type) *
+ node_info->agg_params.num_util_levels,
+ GFP_KERNEL);
+ if (!node_info->agg_params.util_levels) {
+ MSM_BUS_ERR("%s: Agg util level alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+ memcpy(node_info->agg_params.util_levels,
+ pdata_node_info->agg_params.util_levels,
+ sizeof(struct node_util_levels_type) *
+ pdata_node_info->agg_params.num_util_levels);
+
+ node_info->dev_connections = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_connections,
+ GFP_KERNEL);
+ if (!node_info->dev_connections) {
+ MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->connections = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_connections,
+ GFP_KERNEL);
+ if (!node_info->connections) {
+ MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->connections,
+ pdata_node_info->connections,
+ sizeof(int) * pdata_node_info->num_connections);
+
+ node_info->black_connections = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_blist,
+ GFP_KERNEL);
+ if (!node_info->black_connections) {
+ MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+ __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->bl_cons = devm_kzalloc(bus_dev,
+ pdata_node_info->num_blist * sizeof(int),
+ GFP_KERNEL);
+ if (!node_info->bl_cons) {
+ MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+ __func__);
+ devm_kfree(bus_dev, node_info->black_connections);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->bl_cons,
+ pdata_node_info->bl_cons,
+ sizeof(int) * pdata_node_info->num_blist);
+
+ node_info->bcm_devs = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_bcm_devs,
+ GFP_KERNEL);
+ if (!node_info->bcm_devs) {
+ MSM_BUS_ERR("%s:Bcm dev connections alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->bcm_dev_ids = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_bcm_devs,
+ GFP_KERNEL);
+ if (!node_info->bcm_dev_ids) {
+ MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+ devm_kfree(bus_dev, node_info->bcm_devs);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->bcm_dev_ids,
+ pdata_node_info->bcm_dev_ids,
+ sizeof(int) * pdata_node_info->num_bcm_devs);
+
+ node_info->rsc_devs = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_rsc_devs,
+ GFP_KERNEL);
+ if (!node_info->rsc_devs) {
+ MSM_BUS_ERR("%s:rsc dev connections alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->rsc_dev_ids = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_rsc_devs,
+ GFP_KERNEL);
+ if (!node_info->rsc_dev_ids) {
+ MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+ devm_kfree(bus_dev, node_info->rsc_devs);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->rsc_dev_ids,
+ pdata_node_info->rsc_dev_ids,
+ sizeof(int) * pdata_node_info->num_rsc_devs);
+
+ node_info->qport = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_qports,
+ GFP_KERNEL);
+ if (!node_info->qport) {
+ MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ devm_kfree(bus_dev, node_info->bl_cons);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->qport,
+ pdata_node_info->qport,
+ sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+ return ret;
+}
+
+static struct device *msm_bus_device_init(
+ struct msm_bus_node_device_type *pdata)
+{
+ struct device *bus_dev = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+ struct msm_bus_node_info_type *node_info = NULL;
+ int ret = -ENODEV, i = 0;
+
+ /*
+ * Init here so we can use devm calls
+ */
+
+ bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+ if (!bus_node) {
+ ret = -ENOMEM;
+ goto err_device_init;
+ }
+ bus_dev = &bus_node->dev;
+ device_initialize(bus_dev);
+
+ node_info = devm_kzalloc(bus_dev,
+ sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+ if (!node_info) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
+
+ bus_node->node_info = node_info;
+ bus_node->ap_owned = pdata->ap_owned;
+ bus_node->dirty = false;
+ bus_node->num_qos_bcms = pdata->num_qos_bcms;
+ if (bus_node->num_qos_bcms) {
+ bus_node->qos_bcms = devm_kzalloc(bus_dev,
+ (sizeof(struct qos_bcm_type) *
+ bus_node->num_qos_bcms), GFP_KERNEL);
+ if (!bus_node->qos_bcms) {
+ ret = -ENOMEM;
+ goto err_put_device;
+ }
+ for (i = 0; i < bus_node->num_qos_bcms; i++) {
+ bus_node->qos_bcms[i].qos_bcm_id =
+ pdata->qos_bcms[i].qos_bcm_id;
+ bus_node->qos_bcms[i].vec.vec_a =
+ pdata->qos_bcms[i].vec.vec_a;
+ bus_node->qos_bcms[i].vec.vec_b =
+ pdata->qos_bcms[i].vec.vec_b;
+ }
+ }
+ bus_node->num_regs = pdata->num_regs;
+ if (bus_node->num_regs)
+ bus_node->node_regs = pdata->node_regs;
+
+ bus_dev->of_node = pdata->of_node;
+
+ ret = msm_bus_copy_node_info(pdata, bus_dev);
+ if (ret)
+ goto err_put_device;
+
+ bus_dev->bus = &msm_bus_type;
+ dev_set_name(bus_dev, bus_node->node_info->name);
+
+ ret = device_add(bus_dev);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error registering device %d",
+ __func__, pdata->node_info->id);
+ goto err_put_device;
+ }
+ device_create_file(bus_dev, &dev_attr_bw);
+ INIT_LIST_HEAD(&bus_node->devlist);
+ return bus_dev;
+
+err_put_device:
+ put_device(bus_dev);
+ bus_dev = NULL;
+ kfree(bus_node);
+err_device_init:
+ return ERR_PTR(ret);
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ struct msm_bus_node_device_type *bcm_node = NULL;
+ struct msm_bus_node_device_type *rsc_node = NULL;
+ int ret = 0;
+ int j;
+ struct msm_bus_node_device_type *fab;
+
+ bus_node = to_msm_bus_node(bus_dev);
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+
+ /* Setup parent bus device for this node */
+ if (!bus_node->node_info->is_fab_dev &&
+ !bus_node->node_info->is_bcm_dev &&
+ !bus_node->node_info->is_rsc_dev) {
+ struct device *bus_parent_device =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->bus_device_id,
+ msm_bus_device_match_adhoc);
+
+ if (!bus_parent_device) {
+ MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+ __func__,
+ bus_node->node_info->id,
+ bus_node->node_info->bus_device_id);
+ ret = -ENXIO;
+ goto exit_setup_dev_conn;
+ }
+ bus_node->node_info->bus_device = bus_parent_device;
+ fab = to_msm_bus_node(bus_parent_device);
+ list_add_tail(&bus_node->dev_link, &fab->devlist);
+ }
+
+ bus_node->node_info->is_traversed = false;
+
+ for (j = 0; j < bus_node->node_info->num_connections; j++) {
+ bus_node->node_info->dev_connections[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->connections[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->dev_connections[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+ __func__, bus_node->node_info->connections[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ }
+
+ for (j = 0; j < bus_node->node_info->num_blist; j++) {
+ bus_node->node_info->black_connections[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->bl_cons[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->black_connections[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+ __func__, bus_node->node_info->bl_cons[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ }
+
+ for (j = 0; j < bus_node->node_info->num_bcm_devs; j++) {
+ bus_node->node_info->bcm_devs[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->bcm_dev_ids[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->bcm_devs[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+ __func__, bus_node->node_info->bcm_dev_ids[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ bcm_node = to_msm_bus_node(bus_node->node_info->bcm_devs[j]);
+ bcm_node->bcmdev->num_bus_devs++;
+ }
+
+ for (j = 0; j < bus_node->node_info->num_rsc_devs; j++) {
+ bus_node->node_info->rsc_devs[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->rsc_dev_ids[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->rsc_devs[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+ __func__, bus_node->node_info->rsc_dev_ids[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ rsc_node = to_msm_bus_node(bus_node->node_info->rsc_devs[j]);
+ rsc_node->rscdev->num_bcm_devs++;
+ }
+
+exit_setup_dev_conn:
+ return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+ int j;
+ int ret = 0;
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = to_msm_bus_node(bus_dev);
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ ret = -ENODEV;
+ goto exit_node_debug;
+ }
+
+ MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+ bus_node->node_info->agg_params.buswidth);
+ for (j = 0; j < bus_node->node_info->num_connections; j++) {
+ struct msm_bus_node_device_type *bdev =
+ to_msm_bus_node(bus_node->node_info->dev_connections[j]);
+ MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+ }
+
+ if (bus_node->node_info->is_fab_dev)
+ msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+ return ret;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = to_msm_bus_node(dev);
+
+ if (bus_node)
+ MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+ bus_node->node_info->id);
+ device_unregister(dev);
+ kfree(bus_node);
+ return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+ return 0;
+}
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+ unsigned int i = 1, ret;
+ struct msm_bus_device_node_registration *pdata;
+
+ MSM_BUS_ERR("msm_bus: Probe started");
+ /* If possible, get pdata from device-tree */
+ if (pdev->dev.of_node)
+ pdata = msm_bus_of_to_pdata(pdev);
+ else {
+ pdata = (struct msm_bus_device_node_registration *)
+ pdev->dev.platform_data;
+ }
+
+ MSM_BUS_ERR("msm_bus: DT Parsing complete");
+
+ if (IS_ERR_OR_NULL(pdata)) {
+ MSM_BUS_ERR("No platform data found");
+ ret = -ENODATA;
+ goto exit_device_probe;
+ }
+
+ for (i = 0; i < pdata->num_devices; i++) {
+ struct device *node_dev = NULL;
+
+ node_dev = msm_bus_device_init(&pdata->info[i]);
+
+ if (IS_ERR(node_dev)) {
+ MSM_BUS_ERR("%s: Error during dev init for %d",
+ __func__, pdata->info[i].node_info->id);
+ ret = PTR_ERR(node_dev);
+ goto exit_device_probe;
+ }
+
+ ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+ if (ret) {
+ MSM_BUS_ERR("\n Failed to init bus clk. ret %d", ret);
+ msm_bus_device_remove(pdev);
+ goto exit_device_probe;
+ }
+ /*Is this a fabric device ?*/
+ if (pdata->info[i].node_info->is_fab_dev) {
+ MSM_BUS_DBG("%s: %d is a fab", __func__,
+ pdata->info[i].node_info->id);
+ ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error intializing fab %d",
+ __func__, pdata->info[i].node_info->id);
+ goto exit_device_probe;
+ }
+ }
+ if (pdata->info[i].node_info->is_bcm_dev) {
+ ret = msm_bus_bcm_init(node_dev, &pdata->info[i]);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error intializing bcm %d",
+ __func__, pdata->info[i].node_info->id);
+ goto exit_device_probe;
+ }
+ }
+ if (pdata->info[i].node_info->is_rsc_dev) {
+ ret = msm_bus_rsc_init(pdev, node_dev, &pdata->info[i]);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error intializing rsc %d",
+ __func__, pdata->info[i].node_info->id);
+ goto exit_device_probe;
+ }
+ }
+ }
+
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ msm_bus_setup_dev_conn);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+ goto exit_device_probe;
+ }
+
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ msm_bus_postcon_setup);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error post connection setup", __func__);
+ goto exit_device_probe;
+ }
+
+ /*
+ * Setup the QoS for the nodes, don't check the error codes as we
+ * defer QoS programming to the first transaction in cases of failure
+ * and we want to continue the probe.
+ */
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+
+ /* Register the arb layer ops */
+ msm_bus_arb_setops_adhoc(&arb_ops);
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+ devm_kfree(&pdev->dev, pdata->info);
+ devm_kfree(&pdev->dev, pdata);
+exit_device_probe:
+ return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+ struct bus_rule_type *rule_data = NULL;
+ int num_rules = 0;
+
+ num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+ if (!rule_data)
+ goto exit_rules_probe;
+
+ msm_rule_register(num_rules, rule_data, NULL);
+ static_rules.num_rules = num_rules;
+ static_rules.rules = rule_data;
+ pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+ return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+ struct static_rules_type *static_rules = NULL;
+
+ static_rules = pdev->dev.platform_data;
+ if (static_rules)
+ msm_rule_unregister(static_rules->num_rules,
+ static_rules->rules, NULL);
+ return 0;
+}
+
+static int msm_bus_rsc_probe(struct platform_device *pdev)
+{
+ int i = 0;
+ int ret = 0;
+ uint32_t rsc_id = 0;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,msm-bus-id",
+ &rsc_id);
+ if (ret) {
+ MSM_BUS_ERR("unable to find msm bus id\n");
+ return ret;
+ }
+
+ for (i = 0; i < MSM_BUS_RSC_COUNT - 1; i++) {
+ if (!rsc_clients[i].rsc_id) {
+ rsc_clients[i].rsc_id = rsc_id;
+ rsc_clients[i].client = &pdev->dev;
+ if (IS_ERR_OR_NULL(rsc_clients[i].client)) {
+ rsc_clients[i].rsc_id = 0;
+ rsc_clients[i].client = NULL;
+ }
+ }
+ }
+ return 0;
+}
+
+int msm_bus_rsc_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < MSM_BUS_RSC_COUNT - 1; i++) {
+ rsc_clients[i].rsc_id = 0;
+ rsc_clients[i].client = NULL;
+ }
+ return 0;
+}
+
+static const struct of_device_id rules_match[] = {
+ {.compatible = "qcom,msm-bus-static-bw-rules"},
+ {}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+ .probe = msm_bus_device_rules_probe,
+ .remove = msm_bus_device_rules_remove,
+ .driver = {
+ .name = "msm_bus_rules_device",
+ .of_match_table = rules_match,
+ },
+};
+
+static const struct of_device_id fabric_match[] = {
+ {.compatible = "qcom,msm-bus-device"},
+ {}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+ .probe = msm_bus_device_probe,
+ .remove = msm_bus_device_remove,
+ .driver = {
+ .name = "msm_bus_device",
+ .of_match_table = fabric_match,
+ },
+};
+
+static const struct of_device_id rsc_match[] = {
+ {.compatible = "qcom,msm-bus-rsc"},
+ {}
+};
+
+static struct platform_driver msm_bus_rsc_driver = {
+ .probe = msm_bus_rsc_probe,
+ .remove = msm_bus_rsc_remove,
+ .driver = {
+ .name = "msm_bus_rsc",
+ .of_match_table = rsc_match,
+ },
+};
+
+int __init msm_bus_rsc_init_driver(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_bus_rsc_driver);
+ if (rc)
+ MSM_BUS_ERR("Failed to register msm bus rsc device driver");
+
+ return rc;
+}
+
+
+int __init msm_bus_device_init_driver(void)
+{
+ int rc;
+
+ MSM_BUS_ERR("msm_bus_fabric_rpmh_init_driver\n");
+ rc = platform_driver_register(&msm_bus_device_driver);
+
+ if (rc) {
+ MSM_BUS_ERR("Failed to register bus device driver");
+ return rc;
+ }
+ return platform_driver_register(&msm_bus_rules_driver);
+}
+
+int __init msm_bus_device_late_init(void)
+{
+ commit_late_init_data(true);
+ MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
+ init_time = false;
+ return commit_late_init_data(false);
+}
+core_initcall(msm_bus_rsc_init_driver);
+subsys_initcall(msm_bus_device_init_driver);
+late_initcall_sync(msm_bus_device_late_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.h b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
new file mode 100644
index 0000000..a93f6ff
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+enum msm_bus_noc_qos_mode_type {
+ NOC_QOS_MODE_FIXED = 0,
+ NOC_QOS_MODE_LIMITER,
+ NOC_QOS_MODE_BYPASS,
+ NOC_QOS_MODE_REGULATOR,
+ NOC_QOS_MODE_MAX,
+};
+
+enum msm_bus_noc_qos_mode_perm {
+ NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED),
+ NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER),
+ NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS),
+ NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR),
+};
+
+#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \
+ NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \
+ NOC_QOS_PERM_MODE_REGULATOR)
+
+struct msm_bus_noc_commit {
+ struct msm_bus_node_hw_info *mas;
+ struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_noc_info {
+ void __iomem *base;
+ uint32_t base_addr;
+ uint32_t nmasters;
+ uint32_t nqos_masters;
+ uint32_t nslaves;
+ uint32_t qos_freq; /* QOS Clock in KHz */
+ uint32_t qos_baseoffset;
+ uint32_t qos_delta;
+ uint32_t *mas_modes;
+ uint32_t sbm_offset;
+ struct msm_bus_noc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_noc_qos_priority {
+ uint32_t high_prio;
+ uint32_t low_prio;
+ uint32_t read_prio;
+ uint32_t write_prio;
+ uint32_t p1;
+ uint32_t p0;
+};
+
+struct msm_bus_noc_qos_bw {
+ uint64_t bw; /* Bandwidth in bytes per second */
+ uint32_t ws; /* Window size in nano seconds */
+};
+
+void msm_bus_noc_init(struct msm_bus_noc_info *ninfo);
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode);
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_qos_priority *qprio);
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+ uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+ uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw);
+#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
new file mode 100644
index 0000000..93d7642
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_adhoc.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16 /* 16 bytes minimum for saturation */
+#define BW_SCALE 256 /* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET 0x00003000
+#define QOS_DEFAULT_DELTA 0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+#define MIN_SAT_FIELD 1
+#define MIN_BW_FIELD 1
+
+#define NOC_QOS_REG_BASE(b, o) ((b) + (o))
+
+#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_priorityn {
+ NOC_QOS_PRIORITYn_RMSK = 0x0000000f,
+ NOC_QOS_PRIORITYn_MAXn = 32,
+ NOC_QOS_PRIORITYn_P1_BMSK = 0xc,
+ NOC_QOS_PRIORITYn_P1_SHFT = 0x2,
+ NOC_QOS_PRIORITYn_P0_BMSK = 0x3,
+ NOC_QOS_PRIORITYn_P0_SHFT = 0x0,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+ NOC_QOS_MODEn_RMSK = 0x00000003,
+ NOC_QOS_MODEn_MAXn = 32,
+ NOC_QOS_MODEn_MODE_BMSK = 0x3,
+ NOC_QOS_MODEn_MODE_SHFT = 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+ NOC_QOS_BWn_RMSK = 0x0000ffff,
+ NOC_QOS_BWn_MAXn = 32,
+ NOC_QOS_BWn_BW_BMSK = 0xffff,
+ NOC_QOS_BWn_BW_SHFT = 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+ NOC_QOS_SATn_RMSK = 0x000003ff,
+ NOC_QOS_SATn_MAXn = 32,
+ NOC_QOS_SATn_SAT_BMSK = 0x3ff,
+ NOC_QOS_SATn_SAT_SHFT = 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+ if ((*a > 0) && (*a < b)) {
+ *a = 0;
+ return 1;
+ } else {
+ return do_div(*a, b);
+ }
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+ uint64_t res;
+ uint32_t rem, scale;
+
+ res = 2 * qos_freq * bw_field;
+ scale = BW_SCALE * 1000;
+ rem = noc_div(&res, scale);
+ MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+ return res * 1000000ULL;
+}
+
+/**
+ * Calculate the max BW in Bytes/s for a given time-base.
+ */
+static uint32_t noc_bw_ceil(int bw_field, uint32_t qos_freq_khz)
+{
+ uint64_t bw_temp = 2 * qos_freq_khz * bw_field;
+ uint32_t scale = 1000 * BW_SCALE;
+
+ noc_div(&bw_temp, scale);
+ return bw_temp * 1000000;
+}
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+ if (bw && qos_freq) {
+ uint32_t bwf = bw * qos_freq;
+ uint64_t scale = 1000000000000LL * BW_SCALE *
+ SAT_SCALE * sat;
+ noc_div(&scale, bwf);
+ MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+ return scale;
+ }
+
+ return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+/* Calculate bandwidth field value for requested bandwidth */
+static uint32_t noc_bw_field(uint64_t bw_bps, uint32_t qos_freq_khz)
+{
+ uint32_t bw_field = 0;
+
+ if (bw_bps) {
+ uint32_t rem;
+ uint64_t bw_capped = min_t(uint64_t, bw_bps,
+ MAX_BW(qos_freq_khz));
+ uint64_t bwc = bw_capped * BW_SCALE;
+ uint64_t qf = 2 * qos_freq_khz * 1000;
+
+ rem = noc_div(&bwc, qf);
+ bw_field = (uint32_t)max_t(unsigned long, bwc, MIN_BW_FIELD);
+ bw_field = (uint32_t)min_t(unsigned long, bw_field,
+ MAX_BW_FIELD);
+ }
+
+ MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field);
+ return bw_field;
+}
+
+static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq)
+{
+ uint32_t sat_field = 0;
+
+ if (bw) {
+ /* Limit to max bw and scale bw to 100 KB increments */
+ uint64_t tbw, tscale;
+ uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq));
+ uint32_t rem = noc_div(&bw_scaled, 100000);
+
+ /*
+ * SATURATION =
+ * (BW [MBps] * integration window [us] *
+ * time base frequency [MHz]) / (256 * 16)
+ */
+ tbw = bw_scaled * ws * qos_freq;
+ tscale = BW_SCALE * SAT_SCALE * 1000000LL;
+ rem = noc_div(&tbw, tscale);
+ sat_field = (uint32_t)max_t(unsigned long, tbw, MIN_SAT_FIELD);
+ sat_field = (uint32_t)min_t(unsigned long, sat_field,
+ MAX_SAT_FIELD);
+ }
+
+ MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field);
+ return sat_field;
+}
+
+static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta, uint8_t mode,
+ uint8_t perm_mode)
+{
+ if (mode < NOC_QOS_MODE_MAX &&
+ ((1 << mode) & perm_mode)) {
+ uint32_t reg_val;
+
+ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_MODEn_RMSK;
+ writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) |
+ (mode & NOC_QOS_MODEn_MODE_BMSK)),
+ NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+ }
+ /* Ensure qos mode is set before exiting */
+ wmb();
+}
+
+static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_qos_priority *priority)
+{
+ uint32_t reg_val, val;
+
+ reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+ qos_delta)) & NOC_QOS_PRIORITYn_RMSK;
+ val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) |
+ (val & NOC_QOS_PRIORITYn_P1_BMSK)),
+ NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+
+ reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+ qos_delta))
+ & NOC_QOS_PRIORITYn_RMSK;
+ writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) |
+ (priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)),
+ NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+ /* Ensure qos priority is set before exiting */
+ wmb();
+}
+
+static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off,
+ uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+ uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
+{
+ uint32_t reg_val, val, mode;
+
+ if (!qos_freq) {
+ MSM_BUS_DBG("Zero QoS Freq\n");
+ return;
+ }
+
+ /* If Limiter or Regulator modes are not supported, bw not available*/
+ if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+ NOC_QOS_PERM_MODE_REGULATOR)) {
+ uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq);
+ uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
+ qos_freq);
+
+ MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
+ perm_mode, bw_val, sat_val);
+ /*
+ * If in Limiter/Regulator mode, first go to fixed mode.
+ * Clear QoS accumulator
+ **/
+ mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+ if (mode == NOC_QOS_MODE_REGULATOR || mode ==
+ NOC_QOS_MODE_LIMITER) {
+ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(
+ base, qos_off, mport, qos_delta));
+ val = NOC_QOS_MODE_FIXED;
+ writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+ | (val & NOC_QOS_MODEn_MODE_BMSK),
+ NOC_QOS_MODEn_ADDR(base, qos_off, mport,
+ qos_delta));
+ }
+
+ reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = bw_val << NOC_QOS_BWn_BW_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
+ (val & NOC_QOS_BWn_BW_BMSK)),
+ NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta));
+
+ MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
+ (~NOC_QOS_BWn_BW_BMSK)) | (val &
+ NOC_QOS_BWn_BW_BMSK)));
+
+ reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off,
+ mport, qos_delta));
+ val = sat_val << NOC_QOS_SATn_SAT_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
+ (val & NOC_QOS_SATn_SAT_BMSK)),
+ NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta));
+
+ MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
+ (~NOC_QOS_SATn_SAT_BMSK)) | (val &
+ NOC_QOS_SATn_SAT_BMSK)));
+
+ /* Set mode back to what it was initially */
+ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta));
+ writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+ | (mode & NOC_QOS_MODEn_MODE_BMSK),
+ NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+ /* Ensure that all writes for bandwidth registers have
+ * completed before returning
+ */
+ wmb();
+ }
+}
+
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode)
+{
+ if (perm_mode == NOC_QOS_MODES_ALL_PERM)
+ return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+ else
+ return 31 - __CLZ(mode &
+ NOC_QOS_MODES_ALL_PERM);
+}
+
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_qos_priority *priority)
+{
+ priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >>
+ NOC_QOS_PRIORITYn_P1_SHFT;
+
+ priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >>
+ NOC_QOS_PRIORITYn_P0_SHFT;
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+ uint32_t qos_freq,
+ uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+ struct msm_bus_noc_qos_bw *qbw)
+{
+ if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+ NOC_QOS_PERM_MODE_REGULATOR)) {
+ uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+ base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+ uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+ base, qos_off, mport, qos_delta))
+ & NOC_QOS_SATn_SAT_BMSK;
+
+ qbw->bw = noc_bw(bw_val, qos_freq);
+ qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+ } else {
+ qbw->bw = 0;
+ qbw->ws = 0;
+ }
+}
+
+static bool msm_bus_noc_update_bw_reg(int mode)
+{
+ bool ret = false;
+
+ if ((mode == NOC_QOS_MODE_LIMITER) ||
+ (mode == NOC_QOS_MODE_REGULATOR))
+ ret = true;
+
+ return ret;
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ struct msm_bus_noc_qos_priority prio;
+ int ret = 0;
+ int i;
+
+ prio.p1 = info->node_info->qos_params.prio1;
+ prio.p0 = info->node_info->qos_params.prio0;
+
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ ret = 0;
+ goto err_qos_init;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) {
+ noc_set_qos_priority(qos_base, qos_off,
+ info->node_info->qport[i], qos_delta,
+ &prio);
+
+ if (info->node_info->qos_params.mode !=
+ NOC_QOS_MODE_FIXED) {
+ struct msm_bus_noc_qos_bw qbw;
+
+ qbw.ws = info->node_info->qos_params.ws;
+ qbw.bw = 0;
+ msm_bus_noc_set_qos_bw(qos_base, qos_off,
+ qos_freq,
+ info->node_info->qport[i],
+ qos_delta,
+ info->node_info->qos_params.mode,
+ &qbw);
+ }
+ }
+
+ noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i],
+ qos_delta, info->node_info->qos_params.mode,
+ (1 << info->node_info->qos_params.mode));
+ }
+err_qos_init:
+ return ret;
+}
+
+static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ int ret = 0;
+ uint64_t bw = 0;
+ int i;
+ struct msm_bus_node_info_type *info = dev->node_info;
+
+ if (info && info->num_qports &&
+ ((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) ||
+ (info->qos_params.mode ==
+ NOC_QOS_MODE_LIMITER))) {
+ struct msm_bus_noc_qos_bw qos_bw;
+
+ bw = msm_bus_div64(dev->node_bw[ACTIVE_CTX].sum_ab,
+ info->num_qports);
+
+ for (i = 0; i < info->num_qports; i++) {
+ if (!info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+
+ qos_bw.bw = bw;
+ qos_bw.ws = info->qos_params.ws;
+ msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq,
+ info->qport[i], qos_delta,
+ (1 << info->qos_params.mode), &qos_bw);
+ MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+ qos_bw.ws);
+ }
+ }
+ return ret;
+}
+
+static int msm_bus_noc_set_lim_mode(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq,
+ u64 lim_bw)
+{
+ int i;
+
+ if (info && info->node_info->num_qports) {
+ struct msm_bus_noc_qos_bw qos_bw;
+
+ if (lim_bw != info->node_info->lim_bw) {
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ qos_bw.bw = lim_bw;
+ qos_bw.ws = info->node_info->qos_params.ws;
+ msm_bus_noc_set_qos_bw(qos_base,
+ qos_off, qos_freq,
+ info->node_info->qport[i], qos_delta,
+ (1 << NOC_QOS_MODE_LIMITER), &qos_bw);
+ }
+ info->node_info->lim_bw = lim_bw;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ noc_set_qos_mode(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ NOC_QOS_MODE_LIMITER,
+ (1 << NOC_QOS_MODE_LIMITER));
+ }
+ }
+
+ return 0;
+}
+
+static int msm_bus_noc_set_reg_mode(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq,
+ u64 lim_bw)
+{
+ int i;
+
+ if (info && info->node_info->num_qports) {
+ struct msm_bus_noc_qos_priority prio;
+ struct msm_bus_noc_qos_bw qos_bw;
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ prio.p1 =
+ info->node_info->qos_params.reg_prio1;
+ prio.p0 =
+ info->node_info->qos_params.reg_prio0;
+ noc_set_qos_priority(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ &prio);
+ }
+
+ if (lim_bw != info->node_info->lim_bw) {
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ qos_bw.bw = lim_bw;
+ qos_bw.ws = info->node_info->qos_params.ws;
+ msm_bus_noc_set_qos_bw(qos_base, qos_off,
+ qos_freq,
+ info->node_info->qport[i], qos_delta,
+ (1 << NOC_QOS_MODE_REGULATOR), &qos_bw);
+ }
+ info->node_info->lim_bw = lim_bw;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ noc_set_qos_mode(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ NOC_QOS_MODE_REGULATOR,
+ (1 << NOC_QOS_MODE_REGULATOR));
+ }
+ }
+ return 0;
+}
+
+static int msm_bus_noc_set_def_mode(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq,
+ u64 lim_bw)
+{
+ int i;
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ if (info->node_info->qos_params.mode ==
+ NOC_QOS_MODE_FIXED) {
+ struct msm_bus_noc_qos_priority prio;
+
+ prio.p1 =
+ info->node_info->qos_params.prio1;
+ prio.p0 =
+ info->node_info->qos_params.prio0;
+ noc_set_qos_priority(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta, &prio);
+ }
+ noc_set_qos_mode(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ info->node_info->qos_params.mode,
+ (1 << info->node_info->qos_params.mode));
+ }
+ return 0;
+}
+
+static int msm_bus_noc_limit_mport(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq,
+ int enable_lim, u64 lim_bw)
+{
+ int ret = 0;
+
+ if (!(info && info->node_info->num_qports)) {
+ MSM_BUS_ERR("Invalid Node info or no Qports to program");
+ ret = -ENXIO;
+ goto exit_limit_mport;
+ }
+
+ if (lim_bw) {
+ switch (enable_lim) {
+ case THROTTLE_REG:
+ msm_bus_noc_set_reg_mode(info, qos_base, qos_off,
+ qos_delta, qos_freq, lim_bw);
+ break;
+ case THROTTLE_ON:
+ msm_bus_noc_set_lim_mode(info, qos_base, qos_off,
+ qos_delta, qos_freq, lim_bw);
+ break;
+ default:
+ msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+ qos_delta, qos_freq, lim_bw);
+ break;
+ }
+ } else
+ msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+ qos_delta, qos_freq, lim_bw);
+
+exit_limit_mport:
+ return ret;
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ if (!bus_dev)
+ return -ENODEV;
+
+ bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+ bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw;
+ bus_dev->fabdev->noc_ops.limit_mport = msm_bus_noc_limit_mport;
+ bus_dev->fabdev->noc_ops.update_bw_reg = msm_bus_noc_update_bw_reg;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
new file mode 100644
index 0000000..d6a0ad2
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_rpmh.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16 /* 16 bytes minimum for saturation */
+#define BW_SCALE 256 /* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET 0x00003000
+#define QOS_DEFAULT_DELTA 0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+#define MIN_SAT_FIELD 1
+#define MIN_BW_FIELD 1
+#define READ_TIMEOUT_MS msecs_to_jiffies(1)
+#define READ_DELAY_US 10
+
+#define NOC_QOS_REG_BASE(b, o) ((b) + (o))
+
+/*Sideband Manager Disable Macros*/
+#define DISABLE_SBM_FLAGOUTCLR0_LOW_OFF 0x80
+#define DISABLE_SBM_FLAGOUTCLR0_HIGH_OFF 0x84
+#define DISABLE_SBM_FLAGOUTSET0_LOW_OFF 0x88
+#define DISABLE_SBM_FLAGOUTSET0_HIGH_OFF 0x8C
+#define DISABLE_SBM_FLAGOUTSTATUS0_LOW_OFF 0x90
+#define DISABLE_SBM_FLAGOUTSTATUS0_HIGH_OFF 0x94
+#define DISABLE_SBM_SENSEIN0_LOW_OFF 0x100
+#define DISABLE_SBM_SENSEIN0_HIGH_OFF 0x104
+
+#define DISABLE_SBM_REG_BASE(b, o, d) ((b) + (o) + (d))
+
+#define NOC_QOS_MAINCTL_LOWn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_mainctl_lown {
+ NOC_QOS_MCTL_DFLT_PRIOn_BMSK = 0x00000070,
+ NOC_QOS_MCTL_DFLT_PRIOn_SHFT = 0x4,
+ NOC_QOS_MCTL_URGFWD_ENn_BMSK = 0x00000008,
+ NOC_QOS_MCTL_URGFWD_ENn_SHFT = 0x3,
+ NOC_QOS_MCTL_LIMIT_ENn_BMSK = 0x00000001,
+ NOC_QOS_MCTL_LIMIT_ENn_SHFT = 0x0,
+};
+
+#define NOC_QOS_LIMITBWn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x18 + (d) * (n))
+enum noc_qos_id_limitbwn {
+ NOC_QOS_LIMITBW_BWn_BMSK = 0x000007FF,
+ NOC_QOS_LIMITBW_BWn_SHFT = 0x0,
+ NOC_QOS_LIMITBW_SATn_BMSK = 0x03FF0000,
+ NOC_QOS_LIMITBW_SATn_SHFT = 0x11,
+};
+
+#define NOC_QOS_REGUL0CTLn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x40 + (d) * (n))
+enum noc_qos_id_regul0ctln {
+ NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK = 0x00007000,
+ NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT = 0x8,
+ NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK = 0x00000700,
+ NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT = 0xC,
+ NOC_QOS_REGUL0CTL_WRENn_BMSK = 0x00000002,
+ NOC_QOS_REGUL0CTL_WRENn_SHFT = 0x1,
+ NOC_QOS_REGUL0CTL_RDENn_BMSK = 0x00000001,
+ NOC_QOS_REGUL0CTL_RDENn_SHFT = 0x0,
+};
+
+#define NOC_QOS_REGUL0BWn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x48 + (d) * (n))
+enum noc_qos_id_regul0bwbwn {
+ NOC_QOS_REGUL0BW_BWn_BMSK = 0x000007FF,
+ NOC_QOS_REGUL0BW_BWn_SHFT = 0x0,
+ NOC_QOS_REGUL0BW_SATn_BMSK = 0x03FF0000,
+ NOC_QOS_REGUL0BW_SATn_SHFT = 0x11,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+ NOC_QOS_MODEn_RMSK = 0x00000003,
+ NOC_QOS_MODEn_MAXn = 32,
+ NOC_QOS_MODEn_MODE_BMSK = 0x3,
+ NOC_QOS_MODEn_MODE_SHFT = 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+ NOC_QOS_BWn_RMSK = 0x0000ffff,
+ NOC_QOS_BWn_MAXn = 32,
+ NOC_QOS_BWn_BW_BMSK = 0xffff,
+ NOC_QOS_BWn_BW_SHFT = 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+ NOC_QOS_SATn_RMSK = 0x000003ff,
+ NOC_QOS_SATn_MAXn = 32,
+ NOC_QOS_SATn_SAT_BMSK = 0x3ff,
+ NOC_QOS_SATn_SAT_SHFT = 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+ if ((*a > 0) && (*a < b)) {
+ *a = 0;
+ return 1;
+ } else {
+ return do_div(*a, b);
+ }
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+ uint64_t res;
+ uint32_t rem, scale;
+
+ res = 2 * qos_freq * bw_field;
+ scale = BW_SCALE * 1000;
+ rem = noc_div(&res, scale);
+ MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+ return res * 1000000ULL;
+}
+
+/**
+ * Calculate the max BW in Bytes/s for a given time-base.
+ */
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+ if (bw && qos_freq) {
+ uint32_t bwf = bw * qos_freq;
+ uint64_t scale = 1000000000000LL * BW_SCALE *
+ SAT_SCALE * sat;
+ noc_div(&scale, bwf);
+ MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+ return scale;
+ }
+
+ return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+static void noc_set_qos_dflt_prio(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ uint32_t prio)
+{
+ uint32_t reg_val, val;
+
+ reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = prio << NOC_QOS_MCTL_DFLT_PRIOn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_DFLT_PRIOn_BMSK))) |
+ (val & NOC_QOS_MCTL_DFLT_PRIOn_BMSK)),
+ NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure qos priority is set before exiting */
+ wmb();
+}
+
+static void noc_set_qos_limiter(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_limiter *lim, uint32_t lim_en)
+{
+ uint32_t reg_val, val;
+
+ reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+ qos_delta));
+
+ writel_relaxed((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))),
+ NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure we disable limiter before config*/
+ wmb();
+
+ reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = lim->bw << NOC_QOS_LIMITBW_BWn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_BWn_BMSK))) |
+ (val & NOC_QOS_LIMITBW_BWn_BMSK)),
+ NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+ reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = lim->sat << NOC_QOS_LIMITBW_SATn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_SATn_BMSK))) |
+ (val & NOC_QOS_LIMITBW_SATn_BMSK)),
+ NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure qos limiter settings in place before possibly enabling */
+ wmb();
+
+ reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = lim_en << NOC_QOS_MCTL_LIMIT_ENn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))) |
+ (val & NOC_QOS_MCTL_LIMIT_ENn_BMSK)),
+ NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure qos limiter writes take place before exiting*/
+ wmb();
+}
+
+static void noc_set_qos_regulator(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_regulator *reg,
+ struct msm_bus_noc_regulator_mode *reg_mode)
+{
+ uint32_t reg_val, val;
+
+ reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+ qos_delta)) & (NOC_QOS_REGUL0CTL_WRENn_BMSK |
+ NOC_QOS_REGUL0CTL_RDENn_BMSK);
+
+ writel_relaxed((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK |
+ NOC_QOS_REGUL0CTL_RDENn_BMSK))),
+ NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure qos regulator is disabled before configuring */
+ wmb();
+
+ reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+ qos_delta)) & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK;
+ val = reg->hi_prio << NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK))) |
+ (val & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK)),
+ NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+ reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+ qos_delta)) & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK;
+ val = reg->low_prio << NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK))) |
+ (val & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK)),
+ NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+ reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport,
+ qos_delta)) & NOC_QOS_REGUL0BW_BWn_BMSK;
+ val = reg->bw << NOC_QOS_REGUL0BW_BWn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_BWn_BMSK))) |
+ (val & NOC_QOS_REGUL0BW_BWn_BMSK)),
+ NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta));
+
+ reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport,
+ qos_delta)) & NOC_QOS_REGUL0BW_SATn_BMSK;
+ val = reg->sat << NOC_QOS_REGUL0BW_SATn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_SATn_BMSK))) |
+ (val & NOC_QOS_REGUL0BW_SATn_BMSK)),
+ NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure regulator is configured before possibly enabling */
+ wmb();
+
+ reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = reg_mode->write << NOC_QOS_REGUL0CTL_WRENn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK))) |
+ (val & NOC_QOS_REGUL0CTL_WRENn_BMSK)),
+ NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+ reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = reg_mode->read << NOC_QOS_REGUL0CTL_RDENn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_RDENn_BMSK))) |
+ (val & NOC_QOS_REGUL0CTL_RDENn_BMSK)),
+ NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure regulator is ready before exiting */
+ wmb();
+}
+
+static void noc_set_qos_forwarding(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ bool urg_fwd_en)
+{
+ uint32_t reg_val, val;
+
+ reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = (urg_fwd_en ? 1:0) << NOC_QOS_MCTL_URGFWD_ENn_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_URGFWD_ENn_BMSK))) |
+ (val & NOC_QOS_MCTL_URGFWD_ENn_BMSK)),
+ NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
+
+ /* Ensure qos priority is set before exiting */
+ wmb();
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+ uint32_t qos_freq,
+ uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+ struct msm_bus_noc_qos_bw *qbw)
+{
+ if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+ NOC_QOS_PERM_MODE_REGULATOR)) {
+ uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+ base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+ uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+ base, qos_off, mport, qos_delta))
+ & NOC_QOS_SATn_SAT_BMSK;
+
+ qbw->bw = noc_bw(bw_val, qos_freq);
+ qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+ } else {
+ qbw->bw = 0;
+ qbw->ws = 0;
+ }
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ struct msm_bus_noc_qos_params *qos_params;
+ int ret = 0;
+ int i;
+
+ qos_params = &info->node_info->qos_params;
+
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ ret = 0;
+ goto err_qos_init;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ noc_set_qos_dflt_prio(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ qos_params->prio_dflt);
+
+ noc_set_qos_limiter(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ &qos_params->limiter,
+ qos_params->limiter_en);
+
+ noc_set_qos_regulator(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ &qos_params->reg,
+ &qos_params->reg_mode);
+
+ noc_set_qos_forwarding(qos_base, qos_off,
+ info->node_info->qport[i],
+ qos_delta,
+ qos_params->urg_fwd_en);
+ }
+err_qos_init:
+ return ret;
+}
+
+static int msm_bus_noc_sbm_config(struct msm_bus_node_device_type *node_dev,
+ void __iomem *noc_base, uint32_t sbm_offset,
+ bool enable)
+{
+ int ret = 0, idx;
+ unsigned long j, j_timeout;
+ uint32_t flagset_offset, flagclr_offset, sense_offset;
+
+ for (idx = 0; idx < node_dev->node_info->num_disable_ports; idx++) {
+ uint32_t disable_port = node_dev->node_info->disable_ports[idx];
+ uint32_t reg_val = 0;
+
+ if (disable_port >= 64) {
+ return -EINVAL;
+ } else if (disable_port < 32) {
+ flagset_offset = DISABLE_SBM_FLAGOUTSET0_LOW_OFF;
+ flagclr_offset = DISABLE_SBM_FLAGOUTCLR0_LOW_OFF;
+ sense_offset = DISABLE_SBM_SENSEIN0_LOW_OFF;
+ } else {
+ flagset_offset = DISABLE_SBM_FLAGOUTSET0_HIGH_OFF;
+ flagclr_offset = DISABLE_SBM_FLAGOUTCLR0_HIGH_OFF;
+ sense_offset = DISABLE_SBM_SENSEIN0_HIGH_OFF;
+ disable_port = disable_port - 32;
+ }
+
+ if (enable) {
+ reg_val |= 0x1 << disable_port;
+ writel_relaxed(reg_val, DISABLE_SBM_REG_BASE(noc_base,
+ sbm_offset, flagclr_offset));
+ /* Ensure SBM reconnect took place */
+ wmb();
+
+ j = jiffies;
+ j_timeout = j + READ_TIMEOUT_MS;
+ while (((0x1 << disable_port) &
+ readl_relaxed(DISABLE_SBM_REG_BASE(noc_base,
+ sbm_offset, sense_offset)))) {
+ udelay(READ_DELAY_US);
+ j = jiffies;
+ if (time_after(j, j_timeout)) {
+ MSM_BUS_ERR("%s: SBM enable timeout.\n",
+ __func__);
+ goto sbm_timeout;
+ }
+ }
+ } else {
+ reg_val |= 0x1 << disable_port;
+ writel_relaxed(reg_val, DISABLE_SBM_REG_BASE(noc_base,
+ sbm_offset, flagset_offset));
+ /* Ensure SBM disconnect took place */
+ wmb();
+
+ j = jiffies;
+ j_timeout = j + READ_TIMEOUT_MS;
+ while (!((0x1 << disable_port) &
+ readl_relaxed(DISABLE_SBM_REG_BASE(noc_base,
+ sbm_offset, sense_offset)))) {
+ udelay(READ_DELAY_US);
+ j = jiffies;
+ if (time_after(j, j_timeout)) {
+ MSM_BUS_ERR("%s: SBM disable timeout.\n"
+ , __func__);
+ goto sbm_timeout;
+ }
+ }
+ }
+ }
+ return ret;
+
+sbm_timeout:
+ return -ETIME;
+
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ if (!bus_dev)
+ return -ENODEV;
+
+ bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+ bus_dev->fabdev->noc_ops.sbm_config = msm_bus_noc_sbm_config;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c
new file mode 100644
index 0000000..3d025c4
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+
+static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL};
+static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass",
+ "Regulator", NULL};
+
+static int get_num(const char *const str[], const char *name)
+{
+ int i = 0;
+
+ do {
+ if (!strcmp(name, str[i]))
+ return i;
+
+ i++;
+ } while (str[i] != NULL);
+
+ pr_err("Error: string %s not found\n", name);
+ return -EINVAL;
+}
+
+static struct msm_bus_scale_pdata *get_pdata(struct device *dev,
+ struct device_node *of_node)
+{
+ struct msm_bus_scale_pdata *pdata = NULL;
+ struct msm_bus_paths *usecase = NULL;
+ struct msm_bus_lat_vectors *usecase_lat = NULL;
+ int i = 0, j, ret, num_usecases = 0, num_paths, len;
+ const uint32_t *vec_arr = NULL;
+ bool mem_err = false;
+
+ if (!dev) {
+ pr_err("Error: Null device\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(dev, sizeof(struct msm_bus_scale_pdata),
+ GFP_KERNEL);
+ if (!pdata) {
+ mem_err = true;
+ goto err;
+ }
+
+ ret = of_property_read_string(of_node, "qcom,msm-bus,name",
+ &pdata->name);
+ if (ret) {
+ pr_err("Error: Client name not found\n");
+ goto err;
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
+ &num_usecases);
+ if (ret) {
+ pr_err("Error: num-usecases not found\n");
+ goto err;
+ }
+
+ pdata->num_usecases = num_usecases;
+
+ if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
+ pdata->active_only = 1;
+ else {
+ pr_debug("active_only flag absent.\n");
+ pr_debug("Using dual context by default\n");
+ }
+
+ pdata->alc = of_property_read_bool(of_node, "qcom,msm-bus,alc-voter");
+
+ if (pdata->alc) {
+ usecase_lat = devm_kzalloc(dev,
+ (sizeof(struct msm_bus_lat_vectors) *
+ pdata->num_usecases), GFP_KERNEL);
+ if (!usecase_lat) {
+ mem_err = true;
+ goto err;
+ }
+
+ vec_arr = of_get_property(of_node,
+ "qcom,msm-bus,vectors-alc", &len);
+ if (vec_arr == NULL) {
+ pr_err("Error: Lat vector array not found\n");
+ goto err;
+ }
+
+ if (len != num_usecases * sizeof(uint32_t) * 2) {
+ pr_err("Error: Length-error on getting vectors\n");
+ goto err;
+ }
+
+ for (i = 0; i < num_usecases; i++) {
+ int index = i * 2;
+
+ usecase_lat[i].fal_ns = (uint64_t)
+ be32_to_cpu(vec_arr[index]);
+ usecase_lat[i].idle_t_ns = (uint64_t)
+ be32_to_cpu(vec_arr[index + 1]);
+ }
+
+ pdata->usecase_lat = usecase_lat;
+ return pdata;
+ }
+
+ usecase = devm_kzalloc(dev, (sizeof(struct msm_bus_paths) *
+ pdata->num_usecases), GFP_KERNEL);
+ if (!usecase) {
+ mem_err = true;
+ goto err;
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
+ &num_paths);
+ if (ret) {
+ pr_err("Error: num_paths not found\n");
+ goto err;
+ }
+
+ vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
+ if (vec_arr == NULL) {
+ pr_err("Error: Vector array not found\n");
+ goto err;
+ }
+
+ if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
+ pr_err("Error: Length-error on getting vectors\n");
+ goto err;
+ }
+
+ for (i = 0; i < num_usecases; i++) {
+ usecase[i].num_paths = num_paths;
+ usecase[i].vectors = devm_kzalloc(dev, num_paths *
+ sizeof(struct msm_bus_vectors), GFP_KERNEL);
+ if (!usecase[i].vectors) {
+ mem_err = true;
+ pr_err("Error: Mem alloc failure in vectors\n");
+ goto err;
+ }
+
+ for (j = 0; j < num_paths; j++) {
+ int index = ((i * num_paths) + j) * 4;
+
+ usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
+ usecase[i].vectors[j].dst =
+ be32_to_cpu(vec_arr[index + 1]);
+ usecase[i].vectors[j].ab = (uint64_t)
+ KBTOB(be32_to_cpu(vec_arr[index + 2]));
+ usecase[i].vectors[j].ib = (uint64_t)
+ KBTOB(be32_to_cpu(vec_arr[index + 3]));
+ }
+ }
+
+ pdata->usecase = usecase;
+ return pdata;
+err:
+ if (mem_err) {
+ for (; i > 0; i--)
+ kfree(usecase[i-1].vectors);
+ }
+
+ return NULL;
+}
+
+/**
+ * msm_bus_cl_get_pdata() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct msm_bus_scale_pdata *pdata = NULL;
+
+ if (!pdev) {
+ pr_err("Error: Null Platform device\n");
+ return NULL;
+ }
+
+ of_node = pdev->dev.of_node;
+ pdata = get_pdata(&pdev->dev, of_node);
+ if (!pdata) {
+ pr_err("client has to provide missing entry for successful registration\n");
+ return NULL;
+ }
+
+ return pdata;
+}
+EXPORT_SYMBOL(msm_bus_cl_get_pdata);
+
+/**
+ * msm_bus_cl_get_pdata_from_dev() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata_from_dev(struct device *dev)
+{
+ struct device_node *of_node;
+ struct msm_bus_scale_pdata *pdata = NULL;
+
+ of_node = dev->of_node;
+
+ if (!of_node)
+ return NULL;
+
+ pdata = get_pdata(dev, of_node);
+ if (!pdata) {
+ pr_err("client has to provide missing entry for successful registration\n");
+ return NULL;
+ }
+
+ return pdata;
+}
+EXPORT_SYMBOL(msm_bus_cl_get_pdata_from_dev);
+
+/**
+ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree
+ * node provided by clients. This function should be used when a client
+ * driver needs to register multiple bus-clients from a single device-tree
+ * node associated with the platform-device.
+ *
+ * of_node: The subnode containing information about the bus scaling
+ * data
+ *
+ * pdev: Platform device associated with the device-tree node
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ struct msm_bus_scale_pdata *pdata = NULL;
+
+ if (!pdev) {
+ pr_err("Error: Null Platform device\n");
+ return NULL;
+ }
+
+ if (!of_node) {
+ pr_err("Error: Null of_node passed to bus driver\n");
+ return NULL;
+ }
+
+ pdata = get_pdata(&pdev->dev, of_node);
+ if (!pdata) {
+ pr_err("client has to provide missing entry for successful registration\n");
+ return NULL;
+ }
+
+ return pdata;
+}
+EXPORT_SYMBOL(msm_bus_pdata_from_node);
+
+static int *get_arr(struct platform_device *pdev,
+ const struct device_node *node, const char *prop,
+ int *nports)
+{
+ int size = 0, ret;
+ int *arr = NULL;
+
+ if (of_get_property(node, prop, &size)) {
+ *nports = size / sizeof(int);
+ } else {
+ pr_debug("Property %s not available\n", prop);
+ *nports = 0;
+ return NULL;
+ }
+
+ if (!size) {
+ *nports = 0;
+ return NULL;
+ }
+
+ arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(arr)) {
+ pr_err("Error: Failed to alloc mem for %s\n", prop);
+ return NULL;
+ }
+
+ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+ if (ret) {
+ pr_err("Error in reading property: %s\n", prop);
+ goto err;
+ }
+
+ return arr;
+err:
+ devm_kfree(&pdev->dev, arr);
+ return NULL;
+}
+
+static u64 *get_th_params(struct platform_device *pdev,
+ const struct device_node *node, const char *prop,
+ int *nports)
+{
+ int size = 0, ret;
+ u64 *ret_arr = NULL;
+ int *arr = NULL;
+ int i;
+
+ if (of_get_property(node, prop, &size)) {
+ *nports = size / sizeof(int);
+ } else {
+ pr_debug("Property %s not available\n", prop);
+ *nports = 0;
+ return NULL;
+ }
+
+ if (!size) {
+ *nports = 0;
+ return NULL;
+ }
+
+ ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(ret_arr)) {
+ pr_err("Error: Failed to alloc mem for ret arr %s\n", prop);
+ return NULL;
+ }
+
+ arr = kzalloc(size, GFP_KERNEL);
+ if ((ZERO_OR_NULL_PTR(arr))) {
+ pr_err("Error: Failed to alloc temp mem for %s\n", prop);
+ return NULL;
+ }
+
+ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+ if (ret) {
+ pr_err("Error in reading property: %s\n", prop);
+ goto err;
+ }
+
+ for (i = 0; i < *nports; i++)
+ ret_arr[i] = (uint64_t)KBTOB(arr[i]);
+
+ MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);
+
+ for (i = 0; i < *nports; i++)
+ MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);
+
+ kfree(arr);
+ return ret_arr;
+err:
+ kfree(arr);
+ devm_kfree(&pdev->dev, ret_arr);
+ return NULL;
+}
+
+static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
+ struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata)
+{
+ struct msm_bus_node_info *info;
+ struct device_node *child_node = NULL;
+ int i = 0, ret;
+ int num_bw = 0;
+ u32 temp;
+
+ for_each_child_of_node(of_node, child_node) {
+ i++;
+ }
+
+ pdata->len = i;
+ info = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) *
+ pdata->len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(info)) {
+ pr_err("Failed to alloc memory for nodes: %d\n", pdata->len);
+ goto err;
+ }
+
+ i = 0;
+ child_node = NULL;
+ for_each_child_of_node(of_node, child_node) {
+ const char *sel_str;
+
+ ret = of_property_read_string(child_node, "label",
+ &info[i].name);
+ if (ret)
+ pr_err("Error reading node label\n");
+
+ ret = of_property_read_u32(child_node, "cell-id", &info[i].id);
+ if (ret) {
+ pr_err("Error reading node id\n");
+ goto err;
+ }
+
+ if (of_property_read_bool(child_node, "qcom,gateway"))
+ info[i].gateway = 1;
+
+ of_property_read_u32(child_node, "qcom,mas-hw-id",
+ &info[i].mas_hw_id);
+
+ of_property_read_u32(child_node, "qcom,slv-hw-id",
+ &info[i].slv_hw_id);
+ info[i].masterp = get_arr(pdev, child_node,
+ "qcom,masterp", &info[i].num_mports);
+ /* No need to store number of qports */
+ info[i].qport = get_arr(pdev, child_node,
+ "qcom,qport", &ret);
+ pdata->nmasters += info[i].num_mports;
+
+
+ info[i].slavep = get_arr(pdev, child_node,
+ "qcom,slavep", &info[i].num_sports);
+ pdata->nslaves += info[i].num_sports;
+
+
+ info[i].tier = get_arr(pdev, child_node,
+ "qcom,tier", &info[i].num_tiers);
+
+ if (of_property_read_bool(child_node, "qcom,ahb"))
+ info[i].ahb = 1;
+
+ ret = of_property_read_string(child_node, "qcom,hw-sel",
+ &sel_str);
+ if (ret)
+ info[i].hw_sel = 0;
+ else {
+ ret = get_num(hw_sel_name, sel_str);
+ if (ret < 0) {
+ pr_err("Invalid hw-sel\n");
+ goto err;
+ }
+
+ info[i].hw_sel = ret;
+ }
+
+ of_property_read_u32(child_node, "qcom,buswidth",
+ &info[i].buswidth);
+ of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
+
+ info[i].dual_conf =
+ of_property_read_bool(child_node, "qcom,dual-conf");
+
+
+ info[i].th = get_th_params(pdev, child_node, "qcom,thresh",
+ &info[i].num_thresh);
+
+ info[i].bimc_bw = get_th_params(pdev, child_node,
+ "qcom,bimc,bw", &num_bw);
+
+ if (num_bw != info[i].num_thresh) {
+ pr_err("%s:num_bw %d must equal num_thresh %d\n",
+ __func__, num_bw, info[i].num_thresh);
+ pr_err("%s:Err setting up dual conf for %s\n",
+ __func__, info[i].name);
+ goto err;
+ }
+
+ of_property_read_u32(child_node, "qcom,bimc,gp",
+ &info[i].bimc_gp);
+ of_property_read_u32(child_node, "qcom,bimc,thmp",
+ &info[i].bimc_thmp);
+
+ ret = of_property_read_string(child_node, "qcom,mode-thresh",
+ &sel_str);
+ if (ret)
+ info[i].mode_thresh = 0;
+ else {
+ ret = get_num(mode_sel_name, sel_str);
+ if (ret < 0) {
+ pr_err("Unknown mode :%s\n", sel_str);
+ goto err;
+ }
+
+ info[i].mode_thresh = ret;
+ MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
+ info[i].mode_thresh);
+ }
+
+ ret = of_property_read_string(child_node, "qcom,mode",
+ &sel_str);
+
+ if (ret)
+ info[i].mode = 0;
+ else {
+ ret = get_num(mode_sel_name, sel_str);
+ if (ret < 0) {
+ pr_err("Unknown mode :%s\n", sel_str);
+ goto err;
+ }
+
+ info[i].mode = ret;
+ }
+
+ info[i].nr_lim =
+ of_property_read_bool(child_node, "qcom,nr-lim");
+
+ ret = of_property_read_u32(child_node, "qcom,ff",
+ &info[i].ff);
+ if (ret) {
+ pr_debug("fudge factor not present %d\n", info[i].id);
+ info[i].ff = 0;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,floor-bw",
+ &temp);
+ if (ret) {
+ pr_debug("fabdev floor bw not present %d\n",
+ info[i].id);
+ info[i].floor_bw = 0;
+ } else {
+ info[i].floor_bw = KBTOB(temp);
+ }
+
+ info[i].rt_mas =
+ of_property_read_bool(child_node, "qcom,rt-mas");
+
+ ret = of_property_read_string(child_node, "qcom,perm-mode",
+ &sel_str);
+ if (ret)
+ info[i].perm_mode = 0;
+ else {
+ ret = get_num(mode_sel_name, sel_str);
+ if (ret < 0)
+ goto err;
+
+ info[i].perm_mode = 1 << ret;
+ }
+
+ of_property_read_u32(child_node, "qcom,prio-lvl",
+ &info[i].prio_lvl);
+ of_property_read_u32(child_node, "qcom,prio-rd",
+ &info[i].prio_rd);
+ of_property_read_u32(child_node, "qcom,prio-wr",
+ &info[i].prio_wr);
+ of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0);
+ of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1);
+ ret = of_property_read_string(child_node, "qcom,slaveclk-dual",
+ &info[i].slaveclk[DUAL_CTX]);
+ if (!ret)
+ pr_debug("Got slaveclk_dual: %s\n",
+ info[i].slaveclk[DUAL_CTX]);
+ else
+ info[i].slaveclk[DUAL_CTX] = NULL;
+
+ ret = of_property_read_string(child_node,
+ "qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]);
+ if (!ret)
+ pr_debug("Got slaveclk_active\n");
+ else
+ info[i].slaveclk[ACTIVE_CTX] = NULL;
+
+ ret = of_property_read_string(child_node, "qcom,memclk-dual",
+ &info[i].memclk[DUAL_CTX]);
+ if (!ret)
+ pr_debug("Got memclk_dual\n");
+ else
+ info[i].memclk[DUAL_CTX] = NULL;
+
+ ret = of_property_read_string(child_node, "qcom,memclk-active",
+ &info[i].memclk[ACTIVE_CTX]);
+ if (!ret)
+ pr_debug("Got memclk_active\n");
+ else
+ info[i].memclk[ACTIVE_CTX] = NULL;
+
+ ret = of_property_read_string(child_node, "qcom,iface-clk-node",
+ &info[i].iface_clk_node);
+ if (!ret)
+ pr_debug("Got iface_clk_node\n");
+ else
+ info[i].iface_clk_node = NULL;
+
+ pr_debug("Node name: %s\n", info[i].name);
+ i++;
+ }
+
+ pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters);
+ pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves);
+ return info;
+err:
+ return NULL;
+}
+
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata)
+{
+ struct device_node *of_node;
+ int ret, nfab = 0;
+
+ if (!pdev) {
+ pr_err("Error: Null platform device\n");
+ return;
+ }
+
+ of_node = pdev->dev.of_node;
+ ret = of_property_read_u32(of_node, "qcom,nfab",
+ &nfab);
+ if (!ret)
+ pr_debug("Fab_of: Read number of buses: %u\n", nfab);
+
+ msm_bus_board_set_nfab(pdata, nfab);
+}
+
+struct msm_bus_fabric_registration
+ *msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct msm_bus_fabric_registration *pdata;
+ bool mem_err = false;
+ int ret = 0;
+ const char *sel_str;
+ u32 temp;
+
+ if (!pdev) {
+ pr_err("Error: Null platform device\n");
+ return NULL;
+ }
+
+ of_node = pdev->dev.of_node;
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_fabric_registration), GFP_KERNEL);
+ if (!pdata) {
+ mem_err = true;
+ goto err;
+ }
+
+ ret = of_property_read_string(of_node, "label", &pdata->name);
+ if (ret) {
+ pr_err("Error: label not found\n");
+ goto err;
+ }
+ pr_debug("Fab_of: Read name: %s\n", pdata->name);
+
+ ret = of_property_read_u32(of_node, "cell-id",
+ &pdata->id);
+ if (ret) {
+ pr_err("Error: num-usecases not found\n");
+ goto err;
+ }
+ pr_debug("Fab_of: Read id: %u\n", pdata->id);
+
+ if (of_property_read_bool(of_node, "qcom,ahb"))
+ pdata->ahb = 1;
+
+ ret = of_property_read_string(of_node, "qcom,fabclk-dual",
+ &pdata->fabclk[DUAL_CTX]);
+ if (ret) {
+ pr_debug("fabclk_dual not available\n");
+ pdata->fabclk[DUAL_CTX] = NULL;
+ } else
+ pr_debug("Fab_of: Read clk dual ctx: %s\n",
+ pdata->fabclk[DUAL_CTX]);
+ ret = of_property_read_string(of_node, "qcom,fabclk-active",
+ &pdata->fabclk[ACTIVE_CTX]);
+ if (ret) {
+ pr_debug("Error: fabclk_active not available\n");
+ pdata->fabclk[ACTIVE_CTX] = NULL;
+ } else
+ pr_debug("Fab_of: Read clk act ctx: %s\n",
+ pdata->fabclk[ACTIVE_CTX]);
+
+ ret = of_property_read_u32(of_node, "qcom,ntieredslaves",
+ &pdata->ntieredslaves);
+ if (ret) {
+ pr_err("Error: ntieredslaves not found\n");
+ goto err;
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq);
+ if (ret)
+ pr_debug("qos_freq not available\n");
+
+ ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str);
+ if (ret) {
+ pr_err("Error: hw_sel not found\n");
+ goto err;
+ } else {
+ ret = get_num(hw_sel_name, sel_str);
+ if (ret < 0)
+ goto err;
+
+ pdata->hw_sel = ret;
+ }
+
+ if (of_property_read_bool(of_node, "qcom,virt"))
+ pdata->virt = true;
+
+ ret = of_property_read_u32(of_node, "qcom,qos-baseoffset",
+ &pdata->qos_baseoffset);
+ if (ret)
+ pr_debug("%s:qos_baseoffset not available\n", __func__);
+
+ ret = of_property_read_u32(of_node, "qcom,qos-delta",
+ &pdata->qos_delta);
+ if (ret)
+ pr_debug("%s:qos_delta not available\n", __func__);
+
+ if (of_property_read_bool(of_node, "qcom,rpm-en"))
+ pdata->rpm_enabled = 1;
+
+ ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh",
+ &temp);
+
+ if (ret) {
+ pr_err("nr-lim threshold not specified\n");
+ pdata->nr_lim_thresh = 0;
+ } else {
+ pdata->nr_lim_thresh = KBTOB(temp);
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,eff-fact",
+ &pdata->eff_fact);
+ if (ret) {
+ pr_err("Fab eff-factor not present\n");
+ pdata->eff_fact = 0;
+ }
+
+ pdata->info = get_nodes(of_node, pdev, pdata);
+ return pdata;
+err:
+ return NULL;
+}
+EXPORT_SYMBOL(msm_bus_of_get_fab_data);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
new file mode 100644
index 0000000..bc5e70e
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define DEFAULT_QOS_FREQ 19200
+#define DEFAULT_UTIL_FACT 100
+#define DEFAULT_VRAIL_COMP 100
+#define DEFAULT_AGG_SCHEME AGG_SCHEME_LEG
+
+static int get_qos_mode(struct platform_device *pdev,
+ struct device_node *node, const char *qos_mode)
+{
+ static char const *qos_names[] = {"fixed", "limiter",
+ "bypass", "regulator"};
+ int i = 0;
+ int ret = -1;
+
+ if (!qos_mode)
+ goto exit_get_qos_mode;
+
+ for (i = 0; i < ARRAY_SIZE(qos_names); i++) {
+ if (!strcmp(qos_mode, qos_names[i]))
+ break;
+ }
+ if (i == ARRAY_SIZE(qos_names))
+ dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass\n",
+ qos_mode);
+ else
+ ret = i;
+
+exit_get_qos_mode:
+ return ret;
+}
+
+static int *get_arr(struct platform_device *pdev,
+ struct device_node *node, const char *prop,
+ int *nports)
+{
+ int size = 0, ret;
+ int *arr = NULL;
+
+ if (of_get_property(node, prop, &size)) {
+ *nports = size / sizeof(int);
+ } else {
+ dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+ *nports = 0;
+ return NULL;
+ }
+
+ arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if ((size > 0) && ZERO_OR_NULL_PTR(arr))
+ return NULL;
+
+ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+ if (ret) {
+ dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+ goto arr_err;
+ }
+
+ return arr;
+arr_err:
+ devm_kfree(&pdev->dev, arr);
+ return NULL;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+ struct device_node *dev_node,
+ struct platform_device *pdev)
+{
+ struct msm_bus_fab_device_type *fab_dev;
+ unsigned int ret;
+ struct resource *res;
+ const char *base_name;
+
+ fab_dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_fab_device_type),
+ GFP_KERNEL);
+ if (!fab_dev)
+ return NULL;
+
+ ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
+ if (ret) {
+ dev_err(&pdev->dev, "Error: Unable to get base address name\n");
+ goto fab_dev_err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+ if (!res) {
+ dev_err(&pdev->dev, "Error getting qos base addr %s\n",
+ base_name);
+ goto fab_dev_err;
+ }
+ fab_dev->pqos_base = res->start;
+ fab_dev->qos_range = resource_size(res);
+ fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+ "qcom,bypass-qos-prg");
+
+ ret = of_property_read_u32(dev_node, "qcom,base-offset",
+ &fab_dev->base_offset);
+ if (ret)
+ dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+ ret = of_property_read_u32(dev_node, "qcom,qos-off",
+ &fab_dev->qos_off);
+ if (ret)
+ dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+
+ ret = of_property_read_u32(dev_node, "qcom,bus-type",
+ &fab_dev->bus_type);
+ if (ret) {
+ dev_warn(&pdev->dev, "Bus type is missing\n");
+ goto fab_dev_err;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,qos-freq",
+ &fab_dev->qos_freq);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+ fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+ }
+
+ return fab_dev;
+
+fab_dev_err:
+ devm_kfree(&pdev->dev, fab_dev);
+ fab_dev = 0;
+ return NULL;
+}
+
+static void get_qos_params(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev,
+ struct msm_bus_node_info_type *node_info)
+{
+ const char *qos_mode = NULL;
+ unsigned int ret;
+ unsigned int temp;
+
+ ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);
+
+ if (ret)
+ node_info->qos_params.mode = -1;
+ else
+ node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
+ qos_mode);
+
+ of_property_read_u32(dev_node, "qcom,prio-lvl",
+ &node_info->qos_params.prio_lvl);
+
+ of_property_read_u32(dev_node, "qcom,prio1",
+ &node_info->qos_params.prio1);
+
+ of_property_read_u32(dev_node, "qcom,prio0",
+ &node_info->qos_params.prio0);
+
+ of_property_read_u32(dev_node, "qcom,reg-prio1",
+ &node_info->qos_params.reg_prio1);
+
+ of_property_read_u32(dev_node, "qcom,reg-prio0",
+ &node_info->qos_params.reg_prio0);
+
+ of_property_read_u32(dev_node, "qcom,prio-rd",
+ &node_info->qos_params.prio_rd);
+
+ of_property_read_u32(dev_node, "qcom,prio-wr",
+ &node_info->qos_params.prio_wr);
+
+ of_property_read_u32(dev_node, "qcom,gp",
+ &node_info->qos_params.gp);
+
+ of_property_read_u32(dev_node, "qcom,thmp",
+ &node_info->qos_params.thmp);
+
+ of_property_read_u32(dev_node, "qcom,ws",
+ &node_info->qos_params.ws);
+
+ ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);
+
+ if (ret)
+ node_info->qos_params.bw_buffer = 0;
+ else
+ node_info->qos_params.bw_buffer = KBTOB(temp);
+
+}
+
+static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
+ struct device_node *gdsc_node,
+ struct platform_device *pdev, struct nodeclk **clk_arr,
+ int *num_clks, int id)
+{
+ int ret = 0;
+ int idx = 0;
+ struct property *prop;
+ const char *clk_name;
+ int clks = 0;
+
+ clks = of_property_count_strings(dev_node, "clock-names");
+ if (clks < 0) {
+ dev_err(&pdev->dev, "No qos clks node %d\n", id);
+ ret = clks;
+ goto exit_of_parse_clk_array;
+ }
+
+ *num_clks = clks;
+ *clk_arr = devm_kzalloc(&pdev->dev,
+ (clks * sizeof(struct nodeclk)), GFP_KERNEL);
+
+ if (!(*clk_arr)) {
+ ret = -ENOMEM;
+ *num_clks = 0;
+ goto exit_of_parse_clk_array;
+ }
+
+ of_property_for_each_string(dev_node, "clock-names", prop, clk_name) {
+ char gdsc_string[MAX_REG_NAME];
+
+ (*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name);
+
+ if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) {
+ dev_err(&pdev->dev,
+ "Failed to get clk %s for bus%d\n", clk_name,
+ id);
+ continue;
+ }
+ if (strnstr(clk_name, "no-rate", strlen(clk_name)))
+ (*clk_arr)[idx].enable_only_clk = true;
+
+ scnprintf(gdsc_string, MAX_REG_NAME, "%s-supply", clk_name);
+
+ if (of_find_property(gdsc_node, gdsc_string, NULL))
+ scnprintf((*clk_arr)[idx].reg_name,
+ MAX_REG_NAME, "%s", clk_name);
+ else
+ scnprintf((*clk_arr)[idx].reg_name,
+ MAX_REG_NAME, "%c", '\0');
+
+ idx++;
+ }
+exit_of_parse_clk_array:
+ return ret;
+}
+
+static void get_agg_params(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev,
+ struct msm_bus_node_info_type *node_info)
+{
+ int ret;
+
+
+ ret = of_property_read_u32(dev_node, "qcom,buswidth",
+ &node_info->agg_params.buswidth);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id);
+ node_info->agg_params.buswidth = 8;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,agg-ports",
+ &node_info->agg_params.num_aggports);
+ if (ret)
+ node_info->agg_params.num_aggports = node_info->num_qports;
+
+ ret = of_property_read_u32(dev_node, "qcom,agg-scheme",
+ &node_info->agg_params.agg_scheme);
+ if (ret) {
+ if (node_info->is_fab_dev)
+ node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+ else
+ node_info->agg_params.agg_scheme = AGG_SCHEME_NONE;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
+ &node_info->agg_params.vrail_comp);
+ if (ret) {
+ if (node_info->is_fab_dev)
+ node_info->agg_params.vrail_comp = DEFAULT_VRAIL_COMP;
+ else
+ node_info->agg_params.vrail_comp = 0;
+ }
+
+ if (node_info->agg_params.agg_scheme == AGG_SCHEME_1) {
+ uint32_t len = 0;
+ const uint32_t *util_levels;
+ int i, index = 0;
+
+ util_levels =
+ of_get_property(dev_node, "qcom,util-levels", &len);
+ if (!util_levels)
+ goto err_get_agg_params;
+
+ node_info->agg_params.num_util_levels =
+ len / (sizeof(uint32_t) * 2);
+ node_info->agg_params.util_levels = devm_kzalloc(&pdev->dev,
+ (node_info->agg_params.num_util_levels *
+ sizeof(struct node_util_levels_type)), GFP_KERNEL);
+
+ if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+ goto err_get_agg_params;
+
+ for (i = 0; i < node_info->agg_params.num_util_levels; i++) {
+ node_info->agg_params.util_levels[i].threshold =
+ KBTOB(be32_to_cpu(util_levels[index++]));
+ node_info->agg_params.util_levels[i].util_fact =
+ be32_to_cpu(util_levels[index++]);
+ dev_dbg(&pdev->dev, "[%d]:Thresh:%llu util_fact:%d\n",
+ i,
+ node_info->agg_params.util_levels[i].threshold,
+ node_info->agg_params.util_levels[i].util_fact);
+ }
+ } else {
+ uint32_t util_fact;
+
+ ret = of_property_read_u32(dev_node, "qcom,util-fact",
+ &util_fact);
+ if (ret) {
+ if (node_info->is_fab_dev)
+ util_fact = DEFAULT_UTIL_FACT;
+ else
+ util_fact = 0;
+ }
+
+ if (util_fact) {
+ node_info->agg_params.num_util_levels = 1;
+ node_info->agg_params.util_levels =
+ devm_kzalloc(&pdev->dev,
+ (node_info->agg_params.num_util_levels *
+ sizeof(struct node_util_levels_type)),
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+ goto err_get_agg_params;
+ node_info->agg_params.util_levels[0].util_fact =
+ util_fact;
+ }
+
+ }
+
+ return;
+err_get_agg_params:
+ node_info->agg_params.num_util_levels = 0;
+ node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+}
+
+static struct msm_bus_node_info_type *get_node_info_data(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev)
+{
+ struct msm_bus_node_info_type *node_info;
+ unsigned int ret;
+ int size;
+ int i;
+ struct device_node *con_node;
+ struct device_node *bus_dev;
+
+ node_info = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_node_info_type),
+ GFP_KERNEL);
+ if (!node_info) {
+ dev_err(&pdev->dev,
+ "Error: Unable to allocate memory for node_info\n");
+ return NULL;
+ }
+
+ ret = of_property_read_u32(dev_node, "cell-id", &node_info->id);
+ if (ret) {
+ dev_warn(&pdev->dev, "Bus node is missing cell-id\n");
+ goto node_info_err;
+ }
+ ret = of_property_read_string(dev_node, "label", &node_info->name);
+ if (ret) {
+ dev_warn(&pdev->dev, "Bus node is missing name\n");
+ goto node_info_err;
+ }
+ node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+ &node_info->num_qports);
+
+ if (of_get_property(dev_node, "qcom,connections", &size)) {
+ node_info->num_connections = size / sizeof(int);
+ node_info->connections = devm_kzalloc(&pdev->dev, size,
+ GFP_KERNEL);
+ } else {
+ node_info->num_connections = 0;
+ node_info->connections = 0;
+ }
+
+ for (i = 0; i < node_info->num_connections; i++) {
+ con_node = of_parse_phandle(dev_node, "qcom,connections", i);
+ if (IS_ERR_OR_NULL(con_node))
+ goto node_info_err;
+
+ if (of_property_read_u32(con_node, "cell-id",
+ &node_info->connections[i]))
+ goto node_info_err;
+ of_node_put(con_node);
+ }
+
+ if (of_get_property(dev_node, "qcom,blacklist", &size)) {
+ node_info->num_blist = size/sizeof(u32);
+ node_info->bl_cons = devm_kzalloc(&pdev->dev,
+ size, GFP_KERNEL);
+ } else {
+ node_info->num_blist = 0;
+ node_info->bl_cons = 0;
+ }
+
+ for (i = 0; i < node_info->num_blist; i++) {
+ con_node = of_parse_phandle(dev_node, "qcom,blacklist", i);
+ if (IS_ERR_OR_NULL(con_node))
+ goto node_info_err;
+
+ if (of_property_read_u32(con_node, "cell-id",
+ &node_info->bl_cons[i]))
+ goto node_info_err;
+ of_node_put(con_node);
+ }
+
+ bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+ if (!IS_ERR_OR_NULL(bus_dev)) {
+ if (of_property_read_u32(bus_dev, "cell-id",
+ &node_info->bus_device_id)) {
+ dev_err(&pdev->dev, "Can't find bus device. Node %d\n",
+ node_info->id);
+ goto node_info_err;
+ }
+
+ of_node_put(bus_dev);
+ } else {
+ dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+ node_info->id);
+ }
+
+ node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+ node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+
+ ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id",
+ &node_info->mas_rpm_id);
+ if (ret) {
+ dev_dbg(&pdev->dev, "mas rpm id is missing\n");
+ node_info->mas_rpm_id = -1;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id",
+ &node_info->slv_rpm_id);
+ if (ret) {
+ dev_dbg(&pdev->dev, "slv rpm id is missing\n");
+ node_info->slv_rpm_id = -1;
+ }
+
+ get_agg_params(dev_node, pdev, node_info);
+ get_qos_params(dev_node, pdev, node_info);
+
+ return node_info;
+
+node_info_err:
+ devm_kfree(&pdev->dev, node_info);
+ node_info = 0;
+ return NULL;
+}
+
+static int get_bus_node_device_data(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev,
+ struct msm_bus_node_device_type * const node_device)
+{
+ bool enable_only;
+ bool setrate_only;
+ struct device_node *qos_clk_node;
+
+ node_device->node_info = get_node_info_data(dev_node, pdev);
+ if (IS_ERR_OR_NULL(node_device->node_info)) {
+ dev_err(&pdev->dev, "Error: Node info missing\n");
+ return -ENODATA;
+ }
+ node_device->ap_owned = of_property_read_bool(dev_node,
+ "qcom,ap-owned");
+
+ if (node_device->node_info->is_fab_dev) {
+ dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+ if (!node_device->node_info->virt_dev) {
+ node_device->fabdev =
+ get_fab_device_info(dev_node, pdev);
+ if (IS_ERR_OR_NULL(node_device->fabdev)) {
+ dev_err(&pdev->dev,
+ "Error: Fabric device info missing\n");
+ devm_kfree(&pdev->dev, node_device->node_info);
+ return -ENODATA;
+ }
+ }
+
+ enable_only = of_property_read_bool(dev_node,
+ "qcom,enable-only-clk");
+ node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+ node_device->clk[ACTIVE_CTX].enable_only_clk = enable_only;
+
+ /*
+ * Doesn't make sense to have a clk handle you can't enable or
+ * set rate on.
+ */
+ if (!enable_only) {
+ setrate_only = of_property_read_bool(dev_node,
+ "qcom,setrate-only-clk");
+ node_device->clk[DUAL_CTX].setrate_only_clk =
+ setrate_only;
+ node_device->clk[ACTIVE_CTX].setrate_only_clk =
+ setrate_only;
+ }
+
+ node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+ "bus_clk");
+
+ if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) {
+ int ret;
+
+ dev_err(&pdev->dev,
+ "%s:Failed to get bus clk for bus%d ctx%d\n",
+ __func__, node_device->node_info->id,
+ DUAL_CTX);
+ ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+ PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+ return ret;
+ }
+
+ if (of_find_property(dev_node, "bus-gdsc-supply", NULL))
+ scnprintf(node_device->clk[DUAL_CTX].reg_name,
+ MAX_REG_NAME, "%s", "bus-gdsc");
+ else
+ scnprintf(node_device->clk[DUAL_CTX].reg_name,
+ MAX_REG_NAME, "%c", '\0');
+
+ node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node,
+ "bus_a_clk");
+ if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk)) {
+ int ret;
+
+ dev_err(&pdev->dev,
+ "Failed to get bus clk for bus%d ctx%d\n",
+ node_device->node_info->id, ACTIVE_CTX);
+ ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+ PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+ return ret;
+ }
+
+ if (of_find_property(dev_node, "bus-a-gdsc-supply", NULL))
+ scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+ MAX_REG_NAME, "%s", "bus-a-gdsc");
+ else
+ scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+ MAX_REG_NAME, "%c", '\0');
+
+ node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+ "bus_qos_clk");
+
+ if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk)) {
+ dev_dbg(&pdev->dev,
+ "%s:Failed to get bus qos clk for %d",
+ __func__, node_device->node_info->id);
+ scnprintf(node_device->bus_qos_clk.reg_name,
+ MAX_REG_NAME, "%c", '\0');
+ } else {
+ if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+ NULL))
+ scnprintf(node_device->bus_qos_clk.reg_name,
+ MAX_REG_NAME, "%s", "bus-qos-gdsc");
+ else
+ scnprintf(node_device->bus_qos_clk.reg_name,
+ MAX_REG_NAME, "%c", '\0');
+ }
+
+ qos_clk_node = of_get_child_by_name(dev_node,
+ "qcom,node-qos-clks");
+
+ if (qos_clk_node) {
+ if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+ pdev,
+ &node_device->node_qos_clks,
+ &node_device->num_node_qos_clks,
+ node_device->node_info->id)) {
+ dev_info(&pdev->dev,
+ "Bypass QoS programming\n");
+ node_device->fabdev->bypass_qos_prg = true;
+ }
+ of_node_put(qos_clk_node);
+ }
+ } else {
+ node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+ "bus_qos_clk");
+
+ if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk))
+ dev_dbg(&pdev->dev,
+ "%s:Failed to get bus qos clk for mas%d",
+ __func__, node_device->node_info->id);
+
+ if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+ NULL))
+ scnprintf(node_device->bus_qos_clk.reg_name,
+ MAX_REG_NAME, "%s", "bus-qos-gdsc");
+ else
+ scnprintf(node_device->bus_qos_clk.reg_name,
+ MAX_REG_NAME, "%c", '\0');
+
+ enable_only = of_property_read_bool(dev_node,
+ "qcom,enable-only-clk");
+ node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+ node_device->bus_qos_clk.enable_only_clk = enable_only;
+
+ /*
+ * Doesn't make sense to have a clk handle you can't enable or
+ * set rate on.
+ */
+ if (!enable_only) {
+ setrate_only = of_property_read_bool(dev_node,
+ "qcom,setrate-only-clk");
+ node_device->clk[DUAL_CTX].setrate_only_clk =
+ setrate_only;
+ node_device->clk[ACTIVE_CTX].setrate_only_clk =
+ setrate_only;
+ }
+
+ qos_clk_node = of_get_child_by_name(dev_node,
+ "qcom,node-qos-clks");
+
+ if (qos_clk_node) {
+ if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+ pdev,
+ &node_device->node_qos_clks,
+ &node_device->num_node_qos_clks,
+ node_device->node_info->id)) {
+ dev_info(&pdev->dev,
+ "Bypass QoS programming\n");
+ node_device->fabdev->bypass_qos_prg = true;
+ }
+ of_node_put(qos_clk_node);
+ }
+
+ node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+ "node_clk");
+
+ if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+ dev_dbg(&pdev->dev,
+ "%s:Failed to get bus clk for bus%d ctx%d",
+ __func__, node_device->node_info->id,
+ DUAL_CTX);
+
+ if (of_find_property(dev_node, "node-gdsc-supply", NULL))
+ scnprintf(node_device->clk[DUAL_CTX].reg_name,
+ MAX_REG_NAME, "%s", "node-gdsc");
+ else
+ scnprintf(node_device->clk[DUAL_CTX].reg_name,
+ MAX_REG_NAME, "%c", '\0');
+
+ }
+ return 0;
+}
+
+struct msm_bus_device_node_registration
+ *msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+ struct device_node *of_node, *child_node;
+ struct msm_bus_device_node_registration *pdata;
+ unsigned int i = 0, j;
+ unsigned int ret;
+
+ if (!pdev) {
+ pr_err("Error: Null platform device\n");
+ return NULL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_device_node_registration),
+ GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->num_devices = of_get_child_count(of_node);
+
+ pdata->info = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_node_device_type) *
+ pdata->num_devices, GFP_KERNEL);
+
+ if (!pdata->info)
+ goto node_reg_err;
+
+ ret = 0;
+ for_each_child_of_node(of_node, child_node) {
+ ret = get_bus_node_device_data(child_node, pdev,
+ &pdata->info[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n");
+ goto node_reg_err_1;
+ }
+ pdata->info[i].of_node = child_node;
+ i++;
+ }
+
+ dev_dbg(&pdev->dev, "bus topology:\n");
+ for (i = 0; i < pdata->num_devices; i++) {
+ dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d",
+ pdata->info[i].node_info->id,
+ pdata->info[i].node_info->num_qports,
+ pdata->info[i].node_info->num_connections);
+ dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n",
+ pdata->info[i].node_info->bus_device_id,
+ pdata->info[i].node_info->agg_params.buswidth);
+ for (j = 0; j < pdata->info[i].node_info->num_connections;
+ j++) {
+ dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+ pdata->info[i].node_info->connections[j]);
+ }
+ for (j = 0; j < pdata->info[i].node_info->num_blist;
+ j++) {
+ dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+ pdata->info[i].node_info->bl_cons[j]);
+ }
+ if (pdata->info[i].fabdev)
+ dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n",
+ (size_t)pdata->info[i].fabdev->pqos_base,
+ pdata->info[i].fabdev->bus_type);
+ }
+ return pdata;
+
+node_reg_err_1:
+ devm_kfree(&pdev->dev, pdata->info);
+node_reg_err:
+ devm_kfree(&pdev->dev, pdata);
+ pdata = NULL;
+ return NULL;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+ struct device_node *dev_node, int **dev_ids,
+ int *num_ids, char *prop_name)
+{
+ int ret = 0;
+ int size, i;
+ struct device_node *rule_node;
+ int *ids = NULL;
+
+ if (of_get_property(dev_node, prop_name, &size)) {
+ *num_ids = size / sizeof(int);
+ ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ } else {
+ dev_err(&pdev->dev, "No rule nodes, skipping node\n");
+ ret = -ENXIO;
+ goto exit_get_ids;
+ }
+
+ *dev_ids = ids;
+ for (i = 0; i < *num_ids; i++) {
+ rule_node = of_parse_phandle(dev_node, prop_name, i);
+ if (IS_ERR_OR_NULL(rule_node)) {
+ dev_err(&pdev->dev, "Can't get rule node id\n");
+ ret = -ENXIO;
+ goto err_get_ids;
+ }
+
+ if (of_property_read_u32(rule_node, "cell-id",
+ &ids[i])) {
+ dev_err(&pdev->dev, "Can't get rule node id\n");
+ ret = -ENXIO;
+ goto err_get_ids;
+ }
+ of_node_put(rule_node);
+ }
+exit_get_ids:
+ return ret;
+err_get_ids:
+ devm_kfree(&pdev->dev, ids);
+ of_node_put(rule_node);
+ ids = NULL;
+ return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+ struct bus_rule_type **static_rules)
+{
+ int ret = 0;
+ struct device_node *of_node, *child_node;
+ int num_rules = 0;
+ int rule_idx = 0;
+ int bw_fld = 0;
+ int i;
+ struct bus_rule_type *local_rule = NULL;
+
+ of_node = pdev->dev.of_node;
+ num_rules = of_get_child_count(of_node);
+ local_rule = devm_kzalloc(&pdev->dev,
+ sizeof(struct bus_rule_type) * num_rules,
+ GFP_KERNEL);
+
+ if (IS_ERR_OR_NULL(local_rule)) {
+ ret = -ENOMEM;
+ goto exit_static_rules;
+ }
+
+ *static_rules = local_rule;
+ for_each_child_of_node(of_node, child_node) {
+ ret = msm_bus_of_get_ids(pdev, child_node,
+ &local_rule[rule_idx].src_id,
+ &local_rule[rule_idx].num_src,
+ "qcom,src-nodes");
+
+ ret = msm_bus_of_get_ids(pdev, child_node,
+ &local_rule[rule_idx].dst_node,
+ &local_rule[rule_idx].num_dst,
+ "qcom,dest-node");
+
+ ret = of_property_read_u32(child_node, "qcom,src-field",
+ &local_rule[rule_idx].src_field);
+ if (ret) {
+ dev_err(&pdev->dev, "src-field missing\n");
+ ret = -ENXIO;
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,src-op",
+ &local_rule[rule_idx].op);
+ if (ret) {
+ dev_err(&pdev->dev, "src-op missing\n");
+ ret = -ENXIO;
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,mode",
+ &local_rule[rule_idx].mode);
+ if (ret) {
+ dev_err(&pdev->dev, "mode missing\n");
+ ret = -ENXIO;
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+ if (ret) {
+ dev_err(&pdev->dev, "thresh missing\n");
+ ret = -ENXIO;
+ goto err_static_rules;
+ } else
+ local_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+ ret = of_property_read_u32(child_node, "qcom,dest-bw",
+ &bw_fld);
+ if (ret)
+ local_rule[rule_idx].dst_bw = 0;
+ else
+ local_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+ rule_idx++;
+ }
+ ret = rule_idx;
+exit_static_rules:
+ return ret;
+err_static_rules:
+ for (i = 0; i < num_rules; i++) {
+ if (!IS_ERR_OR_NULL(local_rule)) {
+ if (!IS_ERR_OR_NULL(local_rule[i].src_id))
+ devm_kfree(&pdev->dev,
+ local_rule[i].src_id);
+ if (!IS_ERR_OR_NULL(local_rule[i].dst_node))
+ devm_kfree(&pdev->dev,
+ local_rule[i].dst_node);
+ devm_kfree(&pdev->dev, local_rule);
+ }
+ }
+ *static_rules = NULL;
+ return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
new file mode 100644
index 0000000..3a4b770
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+
+#define DEFAULT_QOS_FREQ 19200
+#define DEFAULT_UTIL_FACT 100
+#define DEFAULT_VRAIL_COMP 100
+#define DEFAULT_AGG_SCHEME AGG_SCHEME_LEG
+
+static int *get_arr(struct platform_device *pdev,
+ struct device_node *node, const char *prop,
+ int *nports)
+{
+ int size = 0, ret;
+ int *arr = NULL;
+
+ if (of_get_property(node, prop, &size)) {
+ *nports = size / sizeof(int);
+ } else {
+ dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+ *nports = 0;
+ return NULL;
+ }
+
+ arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(arr)) {
+ dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
+ prop);
+ return NULL;
+ }
+
+ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+ if (ret) {
+ dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+ goto arr_err;
+ }
+
+ return arr;
+arr_err:
+ devm_kfree(&pdev->dev, arr);
+ return NULL;
+}
+
+static struct msm_bus_rsc_device_type *get_rsc_device_info(
+ struct device_node *dev_node,
+ struct platform_device *pdev)
+{
+ struct msm_bus_rsc_device_type *rsc_dev;
+
+ rsc_dev = devm_kzalloc(&pdev->dev, sizeof(*rsc_dev),
+ GFP_KERNEL);
+ if (!rsc_dev)
+ return NULL;
+
+ if (of_property_read_u32(dev_node, "qcom,req_state",
+ &rsc_dev->req_state)) {
+ dev_dbg(&pdev->dev, "req_state missing, using default\n");
+ rsc_dev->req_state = 2;
+ }
+
+ return rsc_dev;
+}
+
+static struct msm_bus_bcm_device_type *get_bcm_device_info(
+ struct device_node *dev_node,
+ struct platform_device *pdev)
+{
+ struct msm_bus_bcm_device_type *bcm_dev;
+
+ bcm_dev = devm_kzalloc(&pdev->dev, sizeof(*bcm_dev),
+ GFP_KERNEL);
+ if (!bcm_dev)
+ return NULL;
+
+ if (of_property_read_string(dev_node, "qcom,bcm-name",
+ &bcm_dev->name)) {
+ devm_kfree(&pdev->dev, bcm_dev);
+ return NULL;
+ }
+
+ return bcm_dev;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+ struct device_node *dev_node,
+ struct platform_device *pdev)
+{
+ struct msm_bus_fab_device_type *fab_dev;
+ struct resource *res;
+ const char *base_name;
+
+ fab_dev = devm_kzalloc(&pdev->dev, sizeof(*fab_dev),
+ GFP_KERNEL);
+ if (!fab_dev)
+ return NULL;
+
+ if (of_property_read_string(dev_node, "qcom,base-name", &base_name)) {
+ devm_kfree(&pdev->dev, fab_dev);
+ return NULL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+ if (!res) {
+ devm_kfree(&pdev->dev, fab_dev);
+ return NULL;
+ }
+ fab_dev->pqos_base = res->start;
+ fab_dev->qos_range = resource_size(res);
+ fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+ "qcom,bypass-qos-prg");
+
+ if (of_property_read_u32(dev_node, "qcom,base-offset",
+ &fab_dev->base_offset))
+ dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+ if (of_property_read_u32(dev_node, "qcom,sbm-offset",
+ &fab_dev->sbm_offset))
+ dev_dbg(&pdev->dev, "sbm disable offset is missing\n");
+
+ if (of_property_read_u32(dev_node, "qcom,qos-off",
+ &fab_dev->qos_off))
+ dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+ if (of_property_read_u32(dev_node, "qcom,bus-type",
+ &fab_dev->bus_type))
+ dev_warn(&pdev->dev, "Bus type is missing\n");
+
+ if (of_property_read_u32(dev_node, "qcom,qos-freq",
+ &fab_dev->qos_freq)) {
+ dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+ fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+ }
+
+ return fab_dev;
+}
+
+static void get_qos_params(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev,
+ struct msm_bus_node_info_type *node_info)
+{
+ const uint32_t *vec_arr;
+ int len;
+
+ of_property_read_u32(dev_node, "qcom,prio",
+ &node_info->qos_params.prio_dflt);
+
+ vec_arr = of_get_property(dev_node, "qcom,lim-params", &len);
+ if (vec_arr != NULL && len == sizeof(uint32_t) * 2) {
+ node_info->qos_params.limiter.bw = be32_to_cpu(vec_arr[0]);
+ node_info->qos_params.limiter.sat = be32_to_cpu(vec_arr[1]);
+ }
+
+ node_info->qos_params.limiter_en = of_property_read_bool(dev_node,
+ "qcom,lim-en");
+
+ vec_arr = of_get_property(dev_node, "qcom,qos-reg-params", &len);
+ if (vec_arr != NULL && len == sizeof(uint32_t) * 4) {
+ node_info->qos_params.reg.low_prio = be32_to_cpu(vec_arr[0]);
+ node_info->qos_params.reg.hi_prio = be32_to_cpu(vec_arr[1]);
+ node_info->qos_params.reg.bw = be32_to_cpu(vec_arr[2]);
+ node_info->qos_params.reg.sat = be32_to_cpu(vec_arr[3]);
+ }
+
+ vec_arr = of_get_property(dev_node, "qcom,qos-reg-mode", &len);
+ if (vec_arr != NULL && len == sizeof(uint32_t) * 2) {
+ node_info->qos_params.reg_mode.read = be32_to_cpu(vec_arr[0]);
+ node_info->qos_params.reg_mode.write = be32_to_cpu(vec_arr[1]);
+ }
+
+ node_info->qos_params.urg_fwd_en = of_property_read_bool(dev_node,
+ "qcom,forwarding");
+
+ node_info->qos_params.defer_init_qos = of_property_read_bool(dev_node,
+ "qcom,defer-init-qos");
+}
+
+static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
+ struct device_node *gdsc_node,
+ struct platform_device *pdev, struct nodeclk **clk_arr,
+ int *num_clks, int id)
+{
+ int ret = 0;
+ int idx = 0;
+ struct property *prop;
+ const char *clk_name;
+ int clks = 0;
+
+ clks = of_property_count_strings(dev_node, "clock-names");
+ if (clks < 0)
+ return clks;
+
+ *num_clks = clks;
+ *clk_arr = devm_kcalloc(&pdev->dev, clks, sizeof(*(*clk_arr)),
+ GFP_KERNEL);
+ if (!(*clk_arr)) {
+ *num_clks = 0;
+ return -ENOMEM;
+ }
+ of_property_for_each_string(dev_node, "clock-names", prop, clk_name) {
+ char gdsc_string[MAX_REG_NAME];
+
+ (*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name);
+ if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) {
+ *num_clks = 0;
+ goto exit_of_parse_clk_array;
+ }
+ if (strnstr(clk_name, "no-rate", strlen(clk_name)))
+ (*clk_arr)[idx].enable_only_clk = true;
+
+ scnprintf(gdsc_string, sizeof(gdsc_string), "%s-supply",
+ clk_name);
+
+ if (of_find_property(gdsc_node, gdsc_string, NULL))
+ strlcpy((*clk_arr)[idx].reg_name, clk_name,
+ MAX_REG_NAME);
+ idx++;
+ }
+exit_of_parse_clk_array:
+ return ret;
+}
+
+static void get_agg_params(
+ struct device_node * const dev_node,
+ struct msm_bus_node_info_type *node_info)
+{
+ if (of_property_read_u32(dev_node, "qcom,buswidth",
+ &node_info->agg_params.buswidth))
+ node_info->agg_params.buswidth = 8;
+
+ node_info->agg_params.num_aggports = node_info->num_qports;
+ of_property_read_u32(dev_node, "qcom,agg-ports",
+ &node_info->agg_params.num_aggports);
+
+ if (of_property_read_u32(dev_node, "qcom,agg-scheme",
+ &node_info->agg_params.agg_scheme))
+ node_info->agg_params.agg_scheme =
+ (node_info->is_fab_dev) ? DEFAULT_AGG_SCHEME : AGG_SCHEME_NONE;
+
+ if (of_property_read_u32(dev_node, "qcom,vrail-comp",
+ &node_info->agg_params.vrail_comp))
+ node_info->agg_params.vrail_comp =
+ (node_info->is_fab_dev) ? DEFAULT_VRAIL_COMP : 0;
+}
+
+static int read_cell_ids(struct device *dev, struct device_node *const dev_node,
+ const char *name, u32 **data, u32 *count)
+{
+ u32 size;
+ int i, ret;
+
+ if (!of_get_property(dev_node, name, &size))
+ return 0;
+
+ *count = size / sizeof(u32);
+ *data = devm_kcalloc(dev, *count, size, GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ for (i = 0; i < *count; i++) {
+ struct device_node *node;
+
+ node = of_parse_phandle(dev_node, name, i);
+ if (IS_ERR_OR_NULL(node))
+ return -ENOENT;
+
+ ret = of_property_read_u32(node, "cell-id", &(*data)[i]);
+
+ of_node_put(node);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct msm_bus_node_info_type *get_node_info_data(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev)
+{
+ struct msm_bus_node_info_type *node_info;
+ int ret;
+ int num_disable_ports;
+ struct device_node *bus_dev;
+
+ node_info = devm_kzalloc(&pdev->dev, sizeof(*node_info),
+ GFP_KERNEL);
+ if (!node_info)
+ return NULL;
+
+ if (of_property_read_u32(dev_node, "cell-id", &node_info->id))
+ return NULL;
+
+ if (of_property_read_string(dev_node, "label", &node_info->name))
+ return NULL;
+
+ node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+ &node_info->num_qports);
+
+ num_disable_ports = of_property_count_elems_of_size(dev_node,
+ "qcom,disable-ports", sizeof(uint32_t));
+
+ if (num_disable_ports > 0) {
+ node_info->num_disable_ports = num_disable_ports;
+ node_info->disable_ports = devm_kcalloc(&pdev->dev,
+ num_disable_ports, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!node_info->disable_ports)
+ return NULL;
+ of_property_read_u32_array(dev_node, "qcom,disable-ports",
+ node_info->disable_ports,
+ node_info->num_disable_ports);
+ }
+
+ if (read_cell_ids(&pdev->dev, dev_node, "qcom,connections",
+ &node_info->connections, &node_info->num_connections))
+ return NULL;
+
+ if (read_cell_ids(&pdev->dev, dev_node, "qcom,blacklist",
+ &node_info->bl_cons, &node_info->num_blist))
+ return NULL;
+
+ bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+ if (!IS_ERR_OR_NULL(bus_dev)) {
+ ret = of_property_read_u32(bus_dev, "cell-id",
+ &node_info->bus_device_id);
+ of_node_put(bus_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't find bus device. Node %d\n",
+ node_info->id);
+ return NULL;
+ }
+ } else
+ dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+ node_info->id);
+
+ if (read_cell_ids(&pdev->dev, dev_node, "qcom,bcms",
+ &node_info->bcm_dev_ids, &node_info->num_bcm_devs))
+ return NULL;
+
+ if (read_cell_ids(&pdev->dev, dev_node, "qcom,rscs",
+ &node_info->rsc_dev_ids, &node_info->num_rsc_devs))
+ return NULL;
+
+ node_info->is_bcm_dev = of_property_read_bool(dev_node, "qcom,bcm-dev");
+ node_info->is_rsc_dev = of_property_read_bool(dev_node, "qcom,rsc-dev");
+ node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+ node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+ get_agg_params(dev_node, node_info);
+ get_qos_params(dev_node, pdev, node_info);
+
+ return node_info;
+}
+
+static int get_bus_node_device_data(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev,
+ struct msm_bus_node_device_type * const node_device)
+{
+ bool enable_only;
+ bool setrate_only;
+ int num_elems = 0, num_bcms = 0, i = 0, ret = 0, num_regs = 0;
+ uint32_t *vec_arr = NULL;
+ struct qos_bcm_type *qos_bcms = NULL;
+ struct device_node *qos_clk_node = NULL;
+ const char *reg_name;
+ struct property *prop;
+
+ node_device->node_info = get_node_info_data(dev_node, pdev);
+ if (IS_ERR_OR_NULL(node_device->node_info)) {
+ dev_err(&pdev->dev, "Error: Node info missing\n");
+ return -ENODATA;
+ }
+ node_device->ap_owned = of_property_read_bool(dev_node,
+ "qcom,ap-owned");
+
+ if (node_device->node_info->is_bcm_dev) {
+ node_device->bcmdev = get_bcm_device_info(dev_node, pdev);
+ if (!node_device->bcmdev)
+ return -ENODATA;
+ }
+
+ if (node_device->node_info->is_rsc_dev) {
+ node_device->rscdev = get_rsc_device_info(dev_node, pdev);
+ if (!node_device->rscdev)
+ return -ENODATA;
+ }
+
+ if (node_device->node_info->is_fab_dev) {
+ dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+ if (!node_device->node_info->virt_dev) {
+ node_device->fabdev =
+ get_fab_device_info(dev_node, pdev);
+ if (!node_device->fabdev)
+ return -ENODATA;
+ }
+ } else {
+ num_elems = of_property_count_elems_of_size(dev_node,
+ "qcom,node-qos-bcms", sizeof(uint32_t));
+
+ if (num_elems > 0) {
+ if (num_elems % 3 != 0) {
+ pr_err("Error: Length-error on getting vectors\n");
+ return -ENODATA;
+ }
+
+ vec_arr = devm_kcalloc(&pdev->dev, num_elems,
+ sizeof(*vec_arr), GFP_KERNEL);
+ if (!vec_arr)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(dev_node,
+ "qcom,node-qos-bcms", vec_arr,
+ num_elems);
+ if (ret) {
+ pr_err("Error: problem reading qos-bcm vectors\n");
+ return ret;
+ }
+ num_bcms = num_elems / 3;
+ node_device->num_qos_bcms = num_bcms;
+
+ qos_bcms = devm_kcalloc(&pdev->dev, num_bcms,
+ sizeof(*qos_bcms), GFP_KERNEL);
+ if (!qos_bcms)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bcms; i++) {
+ int index = i * 3;
+
+ qos_bcms[i].qos_bcm_id = vec_arr[index];
+ qos_bcms[i].vec.vec_a =
+ (uint64_t)KBTOB(vec_arr[index + 1]);
+ qos_bcms[i].vec.vec_b =
+ (uint64_t)KBTOB(vec_arr[index + 2]);
+ }
+ node_device->qos_bcms = qos_bcms;
+ }
+
+ enable_only = of_property_read_bool(dev_node,
+ "qcom,enable-only-clk");
+ node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+
+ /*
+ * Doesn't make sense to have a clk handle you can't enable or
+ * set rate on.
+ */
+ if (!enable_only) {
+ setrate_only = of_property_read_bool(dev_node,
+ "qcom,setrate-only-clk");
+ node_device->clk[DUAL_CTX].setrate_only_clk =
+ setrate_only;
+ node_device->clk[ACTIVE_CTX].setrate_only_clk =
+ setrate_only;
+ }
+
+ qos_clk_node = of_get_child_by_name(dev_node,
+ "qcom,node-qos-clks");
+
+ if (qos_clk_node) {
+ if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+ pdev,
+ &node_device->node_qos_clks,
+ &node_device->num_node_qos_clks,
+ node_device->node_info->id)) {
+ dev_dbg(&pdev->dev, "Bypass QoS programming");
+ node_device->fabdev->bypass_qos_prg = true;
+ }
+ of_node_put(qos_clk_node);
+ }
+ node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+ "node_clk");
+
+ if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+ dev_dbg(&pdev->dev,
+ "%s:Failed to get bus clk for bus%d ctx%d",
+ __func__, node_device->node_info->id,
+ DUAL_CTX);
+
+ if (of_find_property(dev_node, "node-gdsc-supply", NULL))
+ strlcpy(node_device->clk[DUAL_CTX].reg_name,
+ "node-gdsc", MAX_REG_NAME);
+
+ num_regs = of_property_count_strings(dev_node,
+ "node-reg-names");
+ if (num_regs > 0) {
+ i = 0;
+ node_device->num_regs = num_regs;
+ node_device->node_regs = devm_kcalloc(&pdev->dev,
+ num_regs, sizeof(*node_device->node_regs),
+ GFP_KERNEL);
+
+ of_property_for_each_string(dev_node, "node-reg-names",
+ prop, reg_name) {
+ strlcpy(node_device->node_regs[i].name,
+ reg_name, MAX_REG_NAME);
+ i++;
+ }
+ }
+ }
+ return 0;
+}
+
+struct msm_bus_device_node_registration
+ *msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+ struct device_node *of_node, *child_node;
+ struct msm_bus_device_node_registration *pdata;
+ unsigned int i = 0, j;
+ unsigned int ret;
+
+ of_node = pdev->dev.of_node;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->num_devices = of_get_child_count(of_node);
+
+ pdata->info = devm_kcalloc(&pdev->dev, pdata->num_devices,
+ sizeof(*pdata->info), GFP_KERNEL);
+ if (!pdata->info)
+ return NULL;
+
+ ret = 0;
+ for_each_child_of_node(of_node, child_node) {
+ ret = get_bus_node_device_data(child_node, pdev,
+ &pdata->info[i]);
+ if (ret)
+ return NULL;
+ pdata->info[i].of_node = child_node;
+ i++;
+ }
+
+ dev_dbg(&pdev->dev, "bus topology:\n");
+ for (i = 0; i < pdata->num_devices; i++) {
+ dev_dbg(&pdev->dev, "id %d\n", pdata->info[i].node_info->id);
+ dev_dbg(&pdev->dev, "num_qports %d\n",
+ pdata->info[i].node_info->num_qports);
+ dev_dbg(&pdev->dev, "num_connections %d\n",
+ pdata->info[i].node_info->num_connections);
+ dev_dbg(&pdev->dev, "bus_device_id %d\n",
+ pdata->info[i].node_info->bus_device_id);
+ dev_dbg(&pdev->dev, "buswidth %d\n",
+ pdata->info[i].node_info->agg_params.buswidth);
+ for (j = 0; j < pdata->info[i].node_info->num_connections;
+ j++) {
+ dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+ pdata->info[i].node_info->connections[j]);
+ }
+ for (j = 0; j < pdata->info[i].node_info->num_blist;
+ j++) {
+ dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+ pdata->info[i].node_info->bl_cons[j]);
+ }
+ if (pdata->info[i].fabdev) {
+ dev_dbg(&pdev->dev, "base_addr %zu\n",
+ (size_t)pdata->info[i].fabdev->pqos_base);
+ dev_dbg(&pdev->dev, "bus_type %d\n",
+ pdata->info[i].fabdev->bus_type);
+ }
+ }
+ return pdata;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+ struct device_node *dev_node, int **dev_ids,
+ int *num_ids, char *prop_name)
+{
+ int ret = 0;
+ int size, i;
+ struct device_node *rule_node;
+ int *ids = NULL;
+
+ *num_ids = of_property_count_elems_of_size(dev_node, prop_name, size);
+ if (!*num_ids) {
+ dev_err(&pdev->dev, "No rule nodes, skipping node\n");
+ ret = -ENXIO;
+ }
+
+ ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!ids)
+ return -ENOMEM;
+
+ *dev_ids = ids;
+ for (i = 0; i < *num_ids; i++) {
+ rule_node = of_parse_phandle(dev_node, prop_name, i);
+ if (IS_ERR_OR_NULL(rule_node))
+ goto err_get_ids;
+
+ if (of_property_read_u32(rule_node, "cell-id",
+ &ids[i]))
+ goto err_get_ids;
+
+ of_node_put(rule_node);
+ }
+
+ return 0;
+err_get_ids:
+ dev_err(&pdev->dev, "Can't get rule node id\n");
+ devm_kfree(&pdev->dev, ids);
+ of_node_put(rule_node);
+ return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+ struct bus_rule_type **static_rules)
+{
+ int ret = 0;
+ struct device_node *of_node, *child_node;
+ int num_rules = 0;
+ int rule_idx = 0;
+ int bw_fld = 0;
+ int i;
+ struct bus_rule_type *local_rule = NULL;
+
+ of_node = pdev->dev.of_node;
+ num_rules = of_get_child_count(of_node);
+ local_rule = devm_kcalloc(&pdev->dev, num_rules,
+ sizeof(*local_rule), GFP_KERNEL);
+
+ if (!local_rule)
+ return -ENOMEM;
+
+ *static_rules = local_rule;
+ for_each_child_of_node(of_node, child_node) {
+ ret = msm_bus_of_get_ids(pdev, child_node,
+ &local_rule[rule_idx].src_id,
+ &local_rule[rule_idx].num_src,
+ "qcom,src-nodes");
+
+ ret = msm_bus_of_get_ids(pdev, child_node,
+ &local_rule[rule_idx].dst_node,
+ &local_rule[rule_idx].num_dst,
+ "qcom,dest-node");
+
+ ret = of_property_read_u32(child_node, "qcom,src-field",
+ &local_rule[rule_idx].src_field);
+ if (ret) {
+ dev_err(&pdev->dev, "src-field missing\n");
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,src-op",
+ &local_rule[rule_idx].op);
+ if (ret) {
+ dev_err(&pdev->dev, "src-op missing\n");
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,mode",
+ &local_rule[rule_idx].mode);
+ if (ret) {
+ dev_err(&pdev->dev, "mode missing\n");
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+ if (ret) {
+ dev_err(&pdev->dev, "thresh missing\n");
+ goto err_static_rules;
+ } else
+ local_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+ ret = of_property_read_u32(child_node, "qcom,dest-bw",
+ &bw_fld);
+ if (ret)
+ local_rule[rule_idx].dst_bw = 0;
+ else
+ local_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+ rule_idx++;
+ }
+ ret = rule_idx;
+err_static_rules:
+ for (i = 0; i < num_rules; i++) {
+ if (local_rule) {
+ devm_kfree(&pdev->dev, local_rule[i].src_id);
+ devm_kfree(&pdev->dev, local_rule[i].dst_node);
+ devm_kfree(&pdev->dev, local_rule);
+ }
+ }
+ *static_rules = NULL;
+ return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
new file mode 100644
index 0000000..d78fd8b5f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask(void)
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+ return true;
+}
+
+struct commit_data {
+ struct msm_bus_node_hw_info *mas_arb;
+ struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+ int c;
+ struct commit_data *cd = (struct commit_data *)cdata;
+
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+ for (c = 0; c < nmasters; c++)
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->mas_arb[c].hw_id,
+ cd->mas_arb[c].bw);
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+ for (c = 0; c < nslaves; c++) {
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->slv_arb[c].hw_id,
+ cd->slv_arb[c].bw);
+ }
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+ struct msm_bus_fabric_registration *fab_pdata,
+ struct commit_data *cd1, struct commit_data *cd2)
+{
+ size_t n;
+ int ret;
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+ ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+ ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+ struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+ struct msm_rpm_request *rpm_req;
+ int ret = 0, msg_id;
+
+ if (ctx == ACTIVE_CTX)
+ ctx = MSM_RPM_CTX_ACTIVE_SET;
+ else if (ctx == DUAL_CTX)
+ ctx = MSM_RPM_CTX_SLEEP_SET;
+
+ rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+ if (rpm_req == NULL) {
+ MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+ return -ENXIO;
+ }
+
+ if (valid) {
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+ &hw_info->bw, (int)(sizeof(uint64_t)));
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ goto free_rpm_request;
+ }
+
+ MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %zu\n", key,
+ hw_info->bw, sizeof(uint64_t));
+ } else {
+ /* Invalidate RPM requests */
+ ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ goto free_rpm_request;
+ }
+ }
+
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ MSM_BUS_WARN("RPM: No message ID for req\n");
+ ret = -ENXIO;
+ goto free_rpm_request;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Ack failed\n");
+ goto free_rpm_request;
+ }
+
+free_rpm_request:
+ msm_rpm_free_request(rpm_req);
+
+ return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+ *fab_pdata, int ctx, void *rpm_data,
+ struct commit_data *cd, bool valid)
+{
+ int i, status = 0, rsc_type, key;
+
+ MSM_BUS_DBG("Context: %d\n", ctx);
+ rsc_type = RPM_BUS_MASTER_REQ;
+ key = RPM_MASTER_FIELD_BW;
+ for (i = 0; i < fab_pdata->nmasters; i++) {
+ if (!cd->mas_arb[i].dirty)
+ continue;
+
+ MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw,
+ cd->mas_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->mas_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw);
+ break;
+ }
+ cd->mas_arb[i].dirty = false;
+ }
+
+ rsc_type = RPM_BUS_SLAVE_REQ;
+ key = RPM_SLAVE_FIELD_BW;
+ for (i = 0; i < fab_pdata->nslaves; i++) {
+ if (!cd->slv_arb[i].dirty)
+ continue;
+
+ MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw,
+ cd->slv_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->slv_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw);
+ break;
+ }
+ cd->slv_arb[i].dirty = false;
+ }
+
+ return status;
+}
+
+/*
+ * msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+ * @fabric: Fabric for which the data should be committed
+ */
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+
+ int ret;
+ bool valid;
+ struct commit_data *dual_cd, *act_cd;
+ void *rpm_data = hw_data;
+
+ MSM_BUS_DBG("\nReached RPM Commit\n");
+ dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+ act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+ /*
+ * If the arb data for active set and sleep set is
+ * different, commit both sets.
+ * If the arb data for active set and sleep set is
+ * the same, invalidate the sleep set.
+ */
+ ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+ if (!ret)
+ /* Invalidate sleep set.*/
+ valid = false;
+ else
+ valid = true;
+
+ ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+ dual_cd, valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, DUAL_CTX);
+
+ valid = true;
+ ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+ valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, ACTIVE_CTX);
+
+ return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ if (!pdata->ahb)
+ pdata->rpm_enabled = 1;
+ return 0;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
new file mode 100644
index 0000000..232ffd9
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+
+#define VCD_MAX_CNT 16
+
+struct msm_bus_node_device_type;
+
+struct link_node {
+ uint64_t lnode_ib[NUM_CTX];
+ uint64_t lnode_ab[NUM_CTX];
+ uint64_t query_ib[NUM_CTX];
+ uint64_t query_ab[NUM_CTX];
+ uint64_t alc_idx[NUM_CTX];
+ int next;
+ struct device *next_dev;
+ struct list_head link;
+ uint32_t in_use;
+ const char *cl_name;
+ unsigned int bus_dev_id;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+ int (*qos_init)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq);
+ int (*set_bw)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq);
+ int (*limit_mport)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq, int enable_lim,
+ uint64_t lim_bw);
+ bool (*update_bw_reg)(int mode);
+ int (*sbm_config)(struct msm_bus_node_device_type *node_dev,
+ void __iomem *noc_base, uint32_t sbm_offset,
+ bool enable);
+};
+
+struct nodebw {
+ uint64_t sum_ab;
+ uint64_t last_sum_ab;
+ uint64_t last_max_ib;
+ uint64_t max_ib;
+ uint64_t max_ab;
+ uint64_t sum_query_ab;
+ uint64_t max_query_ib;
+ uint64_t max_query_ab;
+ uint64_t max_alc;
+ uint64_t cur_clk_hz;
+ uint32_t util_used;
+ uint32_t vrail_used;
+};
+
+struct nodevector {
+ uint64_t vec_a;
+ uint64_t vec_b;
+ uint64_t query_vec_a;
+ uint64_t query_vec_b;
+};
+
+struct node_regulator {
+ char name[MAX_REG_NAME];
+ struct regulator *reg;
+};
+
+struct qos_bcm_type {
+ int qos_bcm_id;
+ struct nodevector vec;
+};
+
+struct msm_bus_rsc_device_type {
+ struct device *mbox;
+ struct list_head bcm_clist[VCD_MAX_CNT];
+ int req_state;
+ uint32_t acv[NUM_CTX];
+ uint32_t query_acv[NUM_CTX];
+ struct tcs_cmd *cmdlist_active;
+ struct tcs_cmd *cmdlist_wake;
+ struct tcs_cmd *cmdlist_sleep;
+ int num_bcm_devs;
+};
+
+struct msm_bus_bcm_device_type {
+ const char *name;
+ uint32_t width;
+ uint32_t clk_domain;
+ uint32_t type;
+ uint32_t unit_size;
+ uint32_t addr;
+ uint32_t drv_id;
+ int num_bus_devs;
+};
+
+struct msm_bus_fab_device_type {
+ void __iomem *qos_base;
+ phys_addr_t pqos_base;
+ size_t qos_range;
+ uint32_t base_offset;
+ uint32_t qos_freq;
+ uint32_t qos_off;
+ uint32_t sbm_offset;
+ struct msm_bus_noc_ops noc_ops;
+ enum msm_bus_hw_sel bus_type;
+ bool bypass_qos_prg;
+};
+
+struct msm_bus_noc_limiter {
+ uint32_t bw;
+ uint32_t sat;
+};
+
+struct msm_bus_noc_regulator {
+ uint32_t low_prio;
+ uint32_t hi_prio;
+ uint32_t bw;
+ uint32_t sat;
+};
+
+struct msm_bus_noc_regulator_mode {
+ uint32_t read;
+ uint32_t write;
+};
+
+struct msm_bus_noc_qos_params {
+ uint32_t prio_dflt;
+ struct msm_bus_noc_limiter limiter;
+ bool limiter_en;
+ struct msm_bus_noc_regulator reg;
+ struct msm_bus_noc_regulator_mode reg_mode;
+ bool urg_fwd_en;
+ bool defer_init_qos;
+};
+
+struct node_util_levels_type {
+ uint64_t threshold;
+ uint32_t util_fact;
+};
+
+struct node_agg_params_type {
+ uint32_t agg_scheme;
+ uint32_t num_aggports;
+ unsigned int buswidth;
+ uint32_t vrail_comp;
+ uint32_t num_util_levels;
+ struct node_util_levels_type *util_levels;
+};
+
+struct msm_bus_node_info_type {
+ const char *name;
+ unsigned int id;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ int num_ports;
+ int num_qports;
+ int *qport;
+ struct msm_bus_noc_qos_params qos_params;
+ unsigned int num_connections;
+ unsigned int num_blist;
+ unsigned int num_bcm_devs;
+ unsigned int num_rsc_devs;
+ bool is_fab_dev;
+ bool virt_dev;
+ bool is_bcm_dev;
+ bool is_rsc_dev;
+ bool is_traversed;
+ unsigned int *connections;
+ unsigned int *bl_cons;
+ unsigned int *bcm_dev_ids;
+ unsigned int *rsc_dev_ids;
+ struct device **dev_connections;
+ struct device **black_connections;
+ struct device **bcm_devs;
+ struct device **rsc_devs;
+ int *bcm_req_idx;
+ unsigned int bus_device_id;
+ struct device *bus_device;
+ struct rule_update_path_info rule;
+ uint64_t lim_bw;
+ bool defer_qos;
+ uint32_t *disable_ports;
+ int num_disable_ports;
+ struct node_agg_params_type agg_params;
+};
+
+struct msm_bus_node_device_type {
+ struct msm_bus_node_info_type *node_info;
+ struct msm_bus_fab_device_type *fabdev;
+ struct msm_bus_bcm_device_type *bcmdev;
+ struct msm_bus_rsc_device_type *rscdev;
+ int num_lnodes;
+ struct link_node *lnode_list;
+ struct nodebw node_bw[NUM_CTX];
+ struct nodevector node_vec[NUM_CTX];
+ struct list_head link;
+ struct list_head query_link;
+ struct nodeclk clk[NUM_CTX];
+ struct nodeclk bus_qos_clk;
+ uint32_t num_node_qos_clks;
+ struct nodeclk *node_qos_clks;
+ uint32_t num_qos_bcms;
+ struct qos_bcm_type *qos_bcms;
+ uint32_t num_regs;
+ struct node_regulator *node_regs;
+ unsigned int ap_owned;
+ struct device_node *of_node;
+ struct device dev;
+ bool dirty;
+ bool updated;
+ bool query_dirty;
+ struct list_head dev_link;
+ struct list_head devlist;
+ bool is_connected;
+};
+
+static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d)
+{
+ return container_of(d, struct msm_bus_node_device_type, dev);
+}
+
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+ int throttle_en, uint64_t lim_bw);
+int msm_bus_commit_data(struct list_head *clist);
+int bcm_remove_handoff_req(struct device *dev, void *data);
+int commit_late_init_data(bool lock);
+int msm_bus_query_gen(struct list_head *qlist,
+ struct msm_bus_tcs_usecase *tcs_usecase);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+ size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+ *msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+ struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+ struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
new file mode 100644
index 0000000..ce5c9cd
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/list_sort.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/msm-bus.h>
+#include <trace/events/trace_msm_bus.h>
+
+struct node_vote_info {
+ int id;
+ u64 ib;
+ u64 ab;
+ u64 clk;
+};
+
+struct rules_def {
+ int rule_id;
+ int num_src;
+ int state;
+ struct node_vote_info *src_info;
+ struct bus_rule_type rule_ops;
+ bool state_change;
+ struct list_head link;
+};
+
+struct rule_node_info {
+ int id;
+ void *data;
+ struct raw_notifier_head rule_notify_list;
+ struct rules_def *cur_rule;
+ int num_rules;
+ struct list_head node_rules;
+ struct list_head link;
+ struct rule_apply_rcm_info apply;
+};
+
+DEFINE_MUTEX(msm_bus_rules_lock);
+static LIST_HEAD(node_list);
+static struct rule_node_info *get_node(u32 id, void *data);
+static int node_rules_compare(void *priv, struct list_head *a,
+ struct list_head *b);
+
+#define LE(op1, op2) (op1 <= op2)
+#define LT(op1, op2) (op1 < op2)
+#define GE(op1, op2) (op1 >= op2)
+#define GT(op1, op2) (op1 > op2)
+#define NB_ID (0x201)
+
+static struct rule_node_info *get_node(u32 id, void *data)
+{
+ struct rule_node_info *node_it = NULL;
+ struct rule_node_info *node_match = NULL;
+
+ list_for_each_entry(node_it, &node_list, link) {
+ if (node_it->id == id) {
+ if (id == NB_ID) {
+ if (node_it->data == data) {
+ node_match = node_it;
+ break;
+ }
+ } else {
+ node_match = node_it;
+ break;
+ }
+ }
+ }
+ return node_match;
+}
+
+static struct rule_node_info *gen_node(u32 id, void *data)
+{
+ struct rule_node_info *node_it = NULL;
+ struct rule_node_info *node_match = NULL;
+
+ list_for_each_entry(node_it, &node_list, link) {
+ if (node_it->id == id) {
+ node_match = node_it;
+ break;
+ }
+ }
+
+ if (!node_match) {
+ node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
+ if (!node_match)
+ goto exit_node_match;
+
+ node_match->id = id;
+ node_match->cur_rule = NULL;
+ node_match->num_rules = 0;
+ node_match->data = data;
+ list_add_tail(&node_match->link, &node_list);
+ INIT_LIST_HEAD(&node_match->node_rules);
+ RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list);
+ pr_debug("Added new node %d to list\n", id);
+ }
+exit_node_match:
+ return node_match;
+}
+
+static bool do_compare_op(u64 op1, u64 op2, int op)
+{
+ bool ret = false;
+
+ switch (op) {
+ case OP_LE:
+ ret = LE(op1, op2);
+ break;
+ case OP_LT:
+ ret = LT(op1, op2);
+ break;
+ case OP_GT:
+ ret = GT(op1, op2);
+ break;
+ case OP_GE:
+ ret = GE(op1, op2);
+ break;
+ case OP_NOOP:
+ ret = true;
+ break;
+ default:
+ pr_info("Invalid OP %d\n", op);
+ break;
+ }
+ return ret;
+}
+
+static void update_src_id_vote(struct rule_update_path_info *inp_node,
+ struct rule_node_info *rule_node)
+{
+ struct rules_def *rule;
+ int i;
+
+ list_for_each_entry(rule, &rule_node->node_rules, link) {
+ for (i = 0; i < rule->num_src; i++) {
+ if (rule->src_info[i].id == inp_node->id) {
+ rule->src_info[i].ib = inp_node->ib;
+ rule->src_info[i].ab = inp_node->ab;
+ rule->src_info[i].clk = inp_node->clk;
+ }
+ }
+ }
+}
+
+static u64 get_field(struct rules_def *rule, int src_id)
+{
+ u64 field = 0;
+ int i;
+
+ for (i = 0; i < rule->num_src; i++) {
+ switch (rule->rule_ops.src_field) {
+ case FLD_IB:
+ field += rule->src_info[i].ib;
+ break;
+ case FLD_AB:
+ field += rule->src_info[i].ab;
+ break;
+ case FLD_CLK:
+ field += rule->src_info[i].clk;
+ break;
+ }
+ }
+
+ return field;
+}
+
+static bool check_rule(struct rules_def *rule,
+ struct rule_update_path_info *inp)
+{
+ bool ret = false;
+
+ if (!rule)
+ return ret;
+
+ switch (rule->rule_ops.op) {
+ case OP_LE:
+ case OP_LT:
+ case OP_GT:
+ case OP_GE:
+ {
+ u64 src_field = get_field(rule, inp->id);
+
+ ret = do_compare_op(src_field, rule->rule_ops.thresh,
+ rule->rule_ops.op);
+ break;
+ }
+ default:
+ pr_err("Unsupported op %d\n", rule->rule_ops.op);
+ break;
+ }
+ return ret;
+}
+
+static void match_rule(struct rule_update_path_info *inp_node,
+ struct rule_node_info *node)
+{
+ struct rules_def *rule;
+ int i;
+
+ list_for_each_entry(rule, &node->node_rules, link) {
+ for (i = 0; i < rule->num_src; i++) {
+ if (rule->src_info[i].id != inp_node->id)
+ continue;
+
+ if (check_rule(rule, inp_node)) {
+ trace_bus_rules_matches(
+ (node->cur_rule ?
+ node->cur_rule->rule_id : -1),
+ inp_node->id, inp_node->ab,
+ inp_node->ib, inp_node->clk);
+ if (rule->state ==
+ RULE_STATE_NOT_APPLIED)
+ rule->state_change = true;
+ rule->state = RULE_STATE_APPLIED;
+ } else {
+ if (rule->state ==
+ RULE_STATE_APPLIED)
+ rule->state_change = true;
+ rule->state = RULE_STATE_NOT_APPLIED;
+ }
+ }
+ }
+}
+
+static void apply_rule(struct rule_node_info *node,
+ struct list_head *output_list)
+{
+ struct rules_def *rule;
+ struct rules_def *last_rule;
+
+ last_rule = node->cur_rule;
+ node->cur_rule = NULL;
+ list_for_each_entry(rule, &node->node_rules, link) {
+ if ((rule->state == RULE_STATE_APPLIED) &&
+ !node->cur_rule)
+ node->cur_rule = rule;
+
+ if (node->id == NB_ID) {
+ if (rule->state_change) {
+ rule->state_change = false;
+ raw_notifier_call_chain(&node->rule_notify_list,
+ rule->state, (void *)&rule->rule_ops);
+ }
+ } else {
+ if ((rule->state == RULE_STATE_APPLIED) &&
+ (node->cur_rule &&
+ (node->cur_rule->rule_id == rule->rule_id))) {
+ node->apply.id = rule->rule_ops.dst_node[0];
+ node->apply.throttle = rule->rule_ops.mode;
+ node->apply.lim_bw = rule->rule_ops.dst_bw;
+ node->apply.after_clk_commit = false;
+ if (last_rule != node->cur_rule)
+ list_add_tail(&node->apply.link,
+ output_list);
+ if (last_rule) {
+ if (node_rules_compare(NULL,
+ &last_rule->link,
+ &node->cur_rule->link) == -1)
+ node->apply.after_clk_commit =
+ true;
+ }
+ }
+ rule->state_change = false;
+ }
+ }
+
+}
+
+int msm_rules_update_path(struct list_head *input_list,
+ struct list_head *output_list)
+{
+ int ret = 0;
+ struct rule_update_path_info *inp_node;
+ struct rule_node_info *node_it = NULL;
+
+ mutex_lock(&msm_bus_rules_lock);
+ list_for_each_entry(inp_node, input_list, link) {
+ list_for_each_entry(node_it, &node_list, link) {
+ update_src_id_vote(inp_node, node_it);
+ match_rule(inp_node, node_it);
+ }
+ }
+
+ list_for_each_entry(node_it, &node_list, link)
+ apply_rule(node_it, output_list);
+ mutex_unlock(&msm_bus_rules_lock);
+ return ret;
+}
+
+static bool ops_equal(int op1, int op2)
+{
+ bool ret = false;
+
+ switch (op1) {
+ case OP_GT:
+ case OP_GE:
+ case OP_LT:
+ case OP_LE:
+ if (abs(op1 - op2) <= 1)
+ ret = true;
+ break;
+ default:
+ ret = (op1 == op2);
+ }
+
+ return ret;
+}
+
+static bool is_throttle_rule(int mode)
+{
+ bool ret = true;
+
+ if (mode == THROTTLE_OFF)
+ ret = false;
+
+ return ret;
+}
+
+static int node_rules_compare(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct rules_def *ra = container_of(a, struct rules_def, link);
+ struct rules_def *rb = container_of(b, struct rules_def, link);
+ int ret = -1;
+ int64_t th_diff = 0;
+
+
+ if (ra->rule_ops.mode == rb->rule_ops.mode) {
+ if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) {
+ if ((ra->rule_ops.op == OP_LT) ||
+ (ra->rule_ops.op == OP_LE)) {
+ th_diff = ra->rule_ops.thresh -
+ rb->rule_ops.thresh;
+ if (th_diff > 0)
+ ret = 1;
+ else
+ ret = -1;
+ } else if ((ra->rule_ops.op == OP_GT) ||
+ (ra->rule_ops.op == OP_GE)) {
+ th_diff = rb->rule_ops.thresh -
+ ra->rule_ops.thresh;
+ if (th_diff > 0)
+ ret = 1;
+ else
+ ret = -1;
+ }
+ } else {
+ ret = ra->rule_ops.op - rb->rule_ops.op;
+ }
+ } else if (is_throttle_rule(ra->rule_ops.mode) &&
+ is_throttle_rule(rb->rule_ops.mode)) {
+ if (ra->rule_ops.mode == THROTTLE_ON)
+ ret = -1;
+ else
+ ret = 1;
+ } else if ((ra->rule_ops.mode == THROTTLE_OFF) &&
+ is_throttle_rule(rb->rule_ops.mode)) {
+ ret = 1;
+ } else if (is_throttle_rule(ra->rule_ops.mode) &&
+ (rb->rule_ops.mode == THROTTLE_OFF)) {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static void print_rules(struct rule_node_info *node_it)
+{
+ struct rules_def *node_rule = NULL;
+ int i;
+
+ if (!node_it) {
+ pr_err("%s: no node for found\n", __func__);
+ return;
+ }
+
+ pr_info("\n Now printing rules for Node %d cur rule %d\n",
+ node_it->id,
+ (node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+ list_for_each_entry(node_rule, &node_it->node_rules, link) {
+ pr_info("\n num Rules %d rule Id %d\n",
+ node_it->num_rules, node_rule->rule_id);
+ pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field);
+ for (i = 0; i < node_rule->rule_ops.num_src; i++)
+ pr_info("Rule: src %d\n",
+ node_rule->rule_ops.src_id[i]);
+ for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+ pr_info("Rule: dst %d dst_bw %llu\n",
+ node_rule->rule_ops.dst_node[i],
+ node_rule->rule_ops.dst_bw);
+ pr_info("Rule: thresh %llu op %d mode %d State %d\n",
+ node_rule->rule_ops.thresh,
+ node_rule->rule_ops.op,
+ node_rule->rule_ops.mode,
+ node_rule->state);
+ }
+}
+
+void print_all_rules(void)
+{
+ struct rule_node_info *node_it = NULL;
+
+ mutex_lock(&msm_bus_rules_lock);
+ list_for_each_entry(node_it, &node_list, link)
+ print_rules(node_it);
+ mutex_unlock(&msm_bus_rules_lock);
+}
+
+void print_rules_buf(char *buf, int max_buf)
+{
+ struct rule_node_info *node_it = NULL;
+ struct rules_def *node_rule = NULL;
+ int i;
+ int cnt = 0;
+
+ mutex_lock(&msm_bus_rules_lock);
+ list_for_each_entry(node_it, &node_list, link) {
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "\n Now printing rules for Node %d cur_rule %d\n",
+ node_it->id,
+ (node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+ list_for_each_entry(node_rule, &node_it->node_rules, link) {
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "\nNum Rules:%d ruleId %d STATE:%d change:%d\n",
+ node_it->num_rules, node_rule->rule_id,
+ node_rule->state, node_rule->state_change);
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Src_field %d\n",
+ node_rule->rule_ops.src_field);
+ for (i = 0; i < node_rule->rule_ops.num_src; i++)
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Src %d Cur Ib %llu Ab %llu\n",
+ node_rule->rule_ops.src_id[i],
+ node_rule->src_info[i].ib,
+ node_rule->src_info[i].ab);
+ for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Dst %d dst_bw %llu\n",
+ node_rule->rule_ops.dst_node[0],
+ node_rule->rule_ops.dst_bw);
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Thresh %llu op %d mode %d\n",
+ node_rule->rule_ops.thresh,
+ node_rule->rule_ops.op,
+ node_rule->rule_ops.mode);
+ }
+ }
+ mutex_unlock(&msm_bus_rules_lock);
+}
+
+static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
+ struct notifier_block *nb)
+{
+ int i;
+ int ret = 0;
+
+ memcpy(&node_rule->rule_ops, src,
+ sizeof(struct bus_rule_type));
+ node_rule->rule_ops.src_id = kzalloc(
+ (sizeof(int) * node_rule->rule_ops.num_src),
+ GFP_KERNEL);
+ if (!node_rule->rule_ops.src_id) {
+ pr_err("%s:Failed to allocate for src_id\n",
+ __func__);
+ return -ENOMEM;
+ }
+ memcpy(node_rule->rule_ops.src_id, src->src_id,
+ sizeof(int) * src->num_src);
+
+
+ if (!nb) {
+ node_rule->rule_ops.dst_node = kzalloc(
+ (sizeof(int) * node_rule->rule_ops.num_dst),
+ GFP_KERNEL);
+ if (!node_rule->rule_ops.dst_node)
+ return -ENOMEM;
+ memcpy(node_rule->rule_ops.dst_node, src->dst_node,
+ sizeof(int) * src->num_dst);
+ }
+
+ node_rule->num_src = src->num_src;
+ node_rule->src_info = kzalloc(
+ (sizeof(struct node_vote_info) * node_rule->rule_ops.num_src),
+ GFP_KERNEL);
+ if (!node_rule->src_info) {
+ pr_err("%s:Failed to allocate for src_id\n",
+ __func__);
+ return -ENOMEM;
+ }
+ for (i = 0; i < src->num_src; i++)
+ node_rule->src_info[i].id = src->src_id[i];
+
+ return ret;
+}
+
+static bool __rule_register(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+ struct rule_node_info *node = NULL;
+ int i, j;
+ struct rules_def *node_rule = NULL;
+ int num_dst = 0;
+ bool reg_success = true;
+
+ if (num_rules <= 0)
+ return false;
+
+ for (i = 0; i < num_rules; i++) {
+ if (nb)
+ num_dst = 1;
+ else
+ num_dst = rule[i].num_dst;
+
+ for (j = 0; j < num_dst; j++) {
+ int id = 0;
+
+ if (nb)
+ id = NB_ID;
+ else
+ id = rule[i].dst_node[j];
+
+ node = gen_node(id, nb);
+ if (!node) {
+ pr_info("Error getting rule\n");
+ reg_success = false;
+ goto exit_rule_register;
+ }
+ node_rule = kzalloc(sizeof(struct rules_def),
+ GFP_KERNEL);
+ if (!node_rule) {
+ reg_success = false;
+ goto exit_rule_register;
+ }
+
+ if (copy_rule(&rule[i], node_rule, nb)) {
+ pr_err("Error copying rule\n");
+ reg_success = false;
+ goto exit_rule_register;
+ }
+
+ node_rule->rule_id = node->num_rules++;
+ if (nb)
+ node->data = nb;
+
+ list_add_tail(&node_rule->link, &node->node_rules);
+ }
+ }
+ list_sort(NULL, &node->node_rules, node_rules_compare);
+ if (nb && nb != node->rule_notify_list.head)
+ raw_notifier_chain_register(&node->rule_notify_list, nb);
+exit_rule_register:
+ return reg_success;
+}
+
+static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb)
+{
+ int ret = 1;
+
+ if (rulea->num_src == ruleb->num_src)
+ ret = memcmp(rulea->src_id, ruleb->src_id,
+ (sizeof(int) * rulea->num_src));
+ if (!ret && (rulea->num_dst == ruleb->num_dst))
+ ret = memcmp(rulea->dst_node, ruleb->dst_node,
+ (sizeof(int) * rulea->num_dst));
+ if (ret || (rulea->dst_bw != ruleb->dst_bw) ||
+ (rulea->op != ruleb->op) || (rulea->thresh != ruleb->thresh))
+ ret = 1;
+ return ret;
+}
+
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+ if (!rule || num_rules <= 0)
+ return;
+
+ mutex_lock(&msm_bus_rules_lock);
+ __rule_register(num_rules, rule, nb);
+ mutex_unlock(&msm_bus_rules_lock);
+}
+
+static bool __rule_unregister(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+ int i = 0;
+ struct rule_node_info *node = NULL;
+ struct rule_node_info *node_tmp = NULL;
+ struct rules_def *node_rule;
+ struct rules_def *node_rule_tmp;
+ bool match_found = false;
+
+ if (num_rules <= 0)
+ return false;
+
+ if (nb) {
+ node = get_node(NB_ID, nb);
+ if (!node) {
+ pr_err("%s: Can't find node\n", __func__);
+ goto exit_unregister_rule;
+ }
+ match_found = true;
+ list_for_each_entry_safe(node_rule, node_rule_tmp,
+ &node->node_rules, link) {
+ if (comp_rules(&node_rule->rule_ops,
+ &rule[i]) == 0) {
+ list_del(&node_rule->link);
+ kfree(node_rule);
+ match_found = true;
+ node->num_rules--;
+ list_sort(NULL,
+ &node->node_rules,
+ node_rules_compare);
+ break;
+ }
+ }
+ if (!node->num_rules)
+ raw_notifier_chain_unregister(
+ &node->rule_notify_list, nb);
+ } else {
+ for (i = 0; i < num_rules; i++) {
+ match_found = false;
+
+ list_for_each_entry(node, &node_list, link) {
+ list_for_each_entry_safe(node_rule,
+ node_rule_tmp, &node->node_rules, link) {
+ if (comp_rules(&node_rule->rule_ops,
+ &rule[i]) != 0)
+ continue;
+ list_del(&node_rule->link);
+ kfree(node_rule);
+ match_found = true;
+ node->num_rules--;
+ list_sort(NULL,
+ &node->node_rules,
+ node_rules_compare);
+ break;
+ }
+ }
+ }
+ }
+
+ list_for_each_entry_safe(node, node_tmp,
+ &node_list, link) {
+ if (!node->num_rules) {
+ pr_debug("Deleting Rule node %d\n", node->id);
+ list_del(&node->link);
+ kfree(node);
+ }
+ }
+exit_unregister_rule:
+ return match_found;
+}
+
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+ if (!rule || num_rules <= 0)
+ return;
+
+ mutex_lock(&msm_bus_rules_lock);
+ __rule_unregister(num_rules, rule, nb);
+ mutex_unlock(&msm_bus_rules_lock);
+}
+
+bool msm_rule_update(struct bus_rule_type *old_rule,
+ struct bus_rule_type *new_rule,
+ struct notifier_block *nb)
+{
+ bool rc = true;
+
+ if (!old_rule || !new_rule)
+ return false;
+ mutex_lock(&msm_bus_rules_lock);
+ if (!__rule_unregister(1, old_rule, nb)) {
+ rc = false;
+ goto exit_rule_update;
+ }
+
+ if (!__rule_register(1, new_rule, nb)) {
+ /*
+ * Registering new rule has failed for some reason, attempt
+ * to re-register the old rule and return error.
+ */
+ __rule_register(1, old_rule, nb);
+ rc = false;
+ }
+exit_rule_update:
+ mutex_unlock(&msm_bus_rules_lock);
+ return rc;
+}
+
+void msm_rule_evaluate_rules(int node)
+{
+ struct msm_bus_client_handle *handle;
+
+ handle = msm_bus_scale_register(node, node, "tmp-rm", false);
+ if (!handle)
+ return;
+ msm_bus_scale_update_bw(handle, 0, 0);
+ msm_bus_scale_unregister(handle);
+}
+
+bool msm_rule_are_rules_registered(void)
+{
+ bool ret = false;
+
+ mutex_lock(&msm_bus_rules_lock);
+ if (list_empty(&node_list))
+ ret = false;
+ else
+ ret = true;
+ mutex_unlock(&msm_bus_rules_lock);
+ return ret;
+}
+
diff --git a/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
new file mode 100644
index 0000000..bedd2f9
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/list.h>
+
+struct msmbus_coresight_adhoc_clock_drvdata {
+ const char *csdev_name;
+ struct clk *clk;
+ struct list_head list;
+};
+
+struct msmbus_coresight_adhoc_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct coresight_desc *desc;
+ struct list_head clocks;
+};
+
+static int msmbus_coresight_enable_adhoc(struct coresight_device *csdev)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk;
+ struct msmbus_coresight_adhoc_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+ long rate;
+
+ list_for_each_entry(clk, &drvdata->clocks, list) {
+ if (!strcmp(dev_name(&csdev->dev), clk->csdev_name)) {
+ rate = clk_round_rate(clk->clk, 1L);
+ clk_set_rate(clk->clk, rate);
+ return clk_prepare_enable(clk->clk);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void msmbus_coresight_disable_adhoc(struct coresight_device *csdev)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk;
+ struct msmbus_coresight_adhoc_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+
+ list_for_each_entry(clk, &drvdata->clocks, list) {
+ if (!strcmp(dev_name(&csdev->dev), clk->csdev_name))
+ clk_disable_unprepare(clk->clk);
+ }
+}
+
+static const struct coresight_ops_source msmbus_coresight_adhoc_source_ops = {
+ .enable = msmbus_coresight_enable_adhoc,
+ .disable = msmbus_coresight_disable_adhoc,
+};
+
+static const struct coresight_ops msmbus_coresight_cs_ops = {
+ .source_ops = &msmbus_coresight_adhoc_source_ops,
+};
+
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk, *next_clk;
+ struct msmbus_coresight_adhoc_drvdata *drvdata =
+ platform_get_drvdata(pdev);
+
+ msmbus_coresight_disable_adhoc(drvdata->csdev);
+ coresight_unregister(drvdata->csdev);
+ list_for_each_entry_safe(clk, next_clk, &drvdata->clocks, list) {
+ list_del(&clk->list);
+ devm_kfree(&pdev->dev, clk);
+ }
+ devm_kfree(&pdev->dev, drvdata->desc);
+ devm_kfree(&pdev->dev, drvdata);
+ platform_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL(msmbus_coresight_remove_adhoc);
+
+static int buspm_of_get_clk_adhoc(struct device_node *of_node,
+ struct msmbus_coresight_adhoc_drvdata *drvdata, const char *name)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk;
+
+ clk = devm_kzalloc(drvdata->dev, sizeof(*clk), GFP_KERNEL);
+
+ if (!clk)
+ return -ENOMEM;
+
+ clk->csdev_name = name;
+
+ clk->clk = of_clk_get_by_name(of_node, "bus_clk");
+ if (IS_ERR(clk->clk)) {
+ pr_err("Error: unable to get clock for coresight node %s\n",
+ name);
+ goto err;
+ }
+
+ list_add(&clk->list, &drvdata->clocks);
+ return 0;
+
+err:
+ devm_kfree(drvdata->dev, clk);
+ return -EINVAL;
+}
+
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+ struct device_node *of_node)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct msmbus_coresight_adhoc_drvdata *drvdata;
+ struct coresight_desc *desc;
+
+ pdata = of_get_coresight_platform_data(dev, of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ drvdata = platform_get_drvdata(pdev);
+ if (IS_ERR_OR_NULL(drvdata)) {
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&drvdata->clocks);
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+ }
+ ret = buspm_of_get_clk_adhoc(of_node, drvdata, pdata->name);
+ if (ret) {
+ pr_err("Error getting clocks\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
+ desc->ops = &msmbus_coresight_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = &pdev->dev;
+ drvdata->desc = desc;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ pr_err("coresight: Coresight register failed\n");
+ ret = PTR_ERR(drvdata->csdev);
+ goto err0;
+ }
+
+ return 0;
+err0:
+ devm_kfree(dev, desc);
+err1:
+ devm_kfree(dev, drvdata);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+EXPORT_SYMBOL(msmbus_coresight_init_adhoc);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM BusPM Adhoc CoreSight Driver");
diff --git a/drivers/soc/qcom/msm_tz_smmu.c b/drivers/soc/qcom/msm_tz_smmu.c
new file mode 100644
index 0000000..ccf4036
--- /dev/null
+++ b/drivers/soc/qcom/msm_tz_smmu.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/msm_tz_smmu.h>
+
+static const char * const device_id_mappings[] = {
+ [TZ_DEVICE_VIDEO] = "VIDEO",
+ [TZ_DEVICE_MDSS] = "MDSS",
+ [TZ_DEVICE_LPASS] = "LPASS",
+ [TZ_DEVICE_MDSS_BOOT] = "MDSS_BOOT",
+ [TZ_DEVICE_USB1_HS] = "USB1_HS",
+ [TZ_DEVICE_OCMEM] = "OCMEM",
+ [TZ_DEVICE_LPASS_CORE] = "LPASS_CORE",
+ [TZ_DEVICE_VPU] = "VPU",
+ [TZ_DEVICE_COPSS_SMMU] = "COPSS_SMMU",
+ [TZ_DEVICE_USB3_0] = "USB3_0",
+ [TZ_DEVICE_USB3_1] = "USB3_1",
+ [TZ_DEVICE_PCIE_0] = "PCIE_0",
+ [TZ_DEVICE_PCIE_1] = "PCIE_1",
+ [TZ_DEVICE_BCSS] = "BCSS",
+ [TZ_DEVICE_VCAP] = "VCAP",
+ [TZ_DEVICE_PCIE20] = "PCIE20",
+ [TZ_DEVICE_IPA] = "IPA",
+ [TZ_DEVICE_APPS] = "APPS",
+ [TZ_DEVICE_GPU] = "GPU",
+ [TZ_DEVICE_UFS] = "UFS",
+ [TZ_DEVICE_ICE] = "ICE",
+ [TZ_DEVICE_ROT] = "ROT",
+ [TZ_DEVICE_VFE] = "VFE",
+ [TZ_DEVICE_ANOC0] = "ANOC0",
+ [TZ_DEVICE_ANOC1] = "ANOC1",
+ [TZ_DEVICE_ANOC2] = "ANOC2",
+ [TZ_DEVICE_CPP] = "CPP",
+ [TZ_DEVICE_JPEG] = "JPEG",
+};
+
+#define MAX_DEVICE_ID_NAME_LEN 20
+
+#define TZ_SMMU_PREPARE_ATOS_ID 0x21
+#define TZ_SMMU_ATOS_START 1
+#define TZ_SMMU_ATOS_END 0
+
+#define SMMU_CHANGE_PAGETABLE_FORMAT 0X01
+
+enum tz_smmu_device_id msm_dev_to_device_id(struct device *dev)
+{
+ const char *device_id;
+ enum tz_smmu_device_id iter;
+
+ if (of_property_read_string(dev->of_node, "qcom,tz-device-id",
+ &device_id)) {
+ dev_err(dev, "no qcom,device-id property\n");
+ return TZ_DEVICE_MAX;
+ }
+
+ for (iter = TZ_DEVICE_START; iter < TZ_DEVICE_MAX; iter++)
+ if (!strcmp(device_id_mappings[iter], device_id))
+ return iter;
+
+ return TZ_DEVICE_MAX;
+}
+
+static int __msm_tz_smmu_atos(struct device *dev, int cb_num, int operation)
+{
+ int ret;
+ struct scm_desc desc = {0};
+ enum tz_smmu_device_id devid = msm_dev_to_device_id(dev);
+
+ if (devid == TZ_DEVICE_MAX)
+ return -ENODEV;
+
+ desc.args[0] = devid;
+ desc.args[1] = cb_num;
+ desc.args[2] = operation;
+ desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, TZ_SMMU_PREPARE_ATOS_ID),
+ &desc);
+ if (ret)
+ pr_info("%s: TZ SMMU ATOS %s failed, ret = %d\n",
+ __func__,
+ operation == TZ_SMMU_ATOS_START ? "start" : "end",
+ ret);
+ return ret;
+}
+
+int msm_tz_smmu_atos_start(struct device *dev, int cb_num)
+{
+ return __msm_tz_smmu_atos(dev, cb_num, TZ_SMMU_ATOS_START);
+}
+
+int msm_tz_smmu_atos_end(struct device *dev, int cb_num)
+{
+ return __msm_tz_smmu_atos(dev, cb_num, TZ_SMMU_ATOS_END);
+}
+
+int msm_tz_set_cb_format(enum tz_smmu_device_id sec_id, int cbndx)
+{
+ struct scm_desc desc = {0};
+ int ret = 0;
+
+ desc.args[0] = sec_id;
+ desc.args[1] = cbndx;
+ desc.args[2] = 1; /* Enable */
+ desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
+ SMMU_CHANGE_PAGETABLE_FORMAT), &desc);
+
+ if (ret) {
+ WARN(1, "Format change failed for CB %d with ret %d\n",
+ cbndx, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 733be16..cb12a67 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -47,7 +47,7 @@
};
static void *qcom_secure_mem;
-#define QCOM_SECURE_MEM_SIZE (512*1024)
+#define QCOM_SECURE_MEM_SIZE (2048*1024)
static int secure_buffer_change_chunk(u32 chunks,
u32 nchunks,
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index c4004b0d..44e977c 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -55,7 +55,7 @@
#define setup_timeout(dest_ss, source_ss, comm_type) \
_setup_timeout(dest_ss, source_ss, comm_type)
-#define cancel_timeout(subsys) del_timer(&subsys->timeout_data.timer)
+#define cancel_timeout(subsys) del_timer_sync(&subsys->timeout_data.timer)
#define init_subsys_timer(subsys) _init_subsys_timer(subsys)
/* Timeout values */
@@ -564,7 +564,6 @@
return;
}
- timeout_data->timer.data = (unsigned long) timeout_data;
timeout_data->comm_type = comm_type;
timeout = jiffies + msecs_to_jiffies(timeout_vals[comm_type]);
mod_timer(&timeout_data->timer, timeout);
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 81a274b..17af8ae 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -396,10 +396,10 @@
}
}
-static void pet_task_wakeup(unsigned long data)
+static void pet_task_wakeup(struct timer_list *t)
{
struct msm_watchdog_data *wdog_dd =
- (struct msm_watchdog_data *)data;
+ from_timer(wdog_dd, t, pet_timer);
wdog_dd->timer_expired = true;
wdog_dd->timer_fired = sched_clock();
wake_up(&wdog_dd->pet_complete);
@@ -703,9 +703,7 @@
wdog_dd->user_pet_complete = true;
wdog_dd->user_pet_enabled = false;
wake_up_process(wdog_dd->watchdog_task);
- init_timer(&wdog_dd->pet_timer);
- wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
- wdog_dd->pet_timer.function = pet_task_wakeup;
+ timer_setup(&wdog_dd->pet_timer, pet_task_wakeup, 0);
wdog_dd->pet_timer.expires = jiffies + delay_time;
add_timer(&wdog_dd->pet_timer);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 360b821..3d547f9 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1325,6 +1325,7 @@
.driver = {
.name = "spmi_pmic_arb",
.of_match_table = spmi_pmic_arb_match_table,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(spmi_pmic_arb_driver);
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index d72e7d7..f4308941 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -55,3 +55,15 @@
We generally don't want to enable this config as it breaks the
cache maintenance model.
If you're not sure say N here.
+
+config ION_DEFER_FREE_NO_SCHED_IDLE
+ bool "Increases the priority of ION defer free thread"
+ depends on ION
+ help
+ Certain heaps such as the system heaps make use of a low priority
+ thread to help free buffer allocations back to the pool which might
+ result in future allocations requests going to the buddy instead of
+ the pool when there is a high buffer allocation rate.
+ Choose this option to remove the SCHED_IDLE flag in case of defer
+ free thereby increasing the priority of defer free thread.
+ if you're not sure say Y here.
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 6bc6f34..f116a64 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -119,6 +119,9 @@
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
+ if (ret == -EINTR)
+ goto err2;
+
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, len, flags);
if (ret)
@@ -217,7 +220,8 @@
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
if (buffer->kmap_cnt == 0) {
- WARN(1, "Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access\n");
+ pr_warn_ratelimited("Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access, pid:%d\n",
+ current->pid);
return;
}
@@ -547,8 +551,10 @@
sg_dma_addr = sg_dma_address(sg);
len += sg->length;
- if (len <= offset)
+ if (len <= offset) {
+ sg_dma_addr += sg->length;
continue;
+ }
sg_left = len - offset;
sg_offset = sg->length - sg_left;
@@ -1052,7 +1058,7 @@
if (!((1 << heap->id) & heap_id_mask))
continue;
buffer = ion_buffer_create(heap, dev, len, flags);
- if (!IS_ERR(buffer))
+ if (!IS_ERR(buffer) || PTR_ERR(buffer) == -EINTR)
break;
}
up_read(&dev->lock);
@@ -1289,12 +1295,3 @@
return idev;
}
EXPORT_SYMBOL(ion_device_create);
-
-void ion_device_destroy(struct ion_device *dev)
-{
- misc_deregister(&dev->dev);
- debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
- kfree(dev);
-}
-EXPORT_SYMBOL(ion_device_destroy);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 3993ed7..a9aed00 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -29,6 +29,7 @@
#define ION_SYSTEM_HEAP_NAME "system"
#define ION_MM_HEAP_NAME "mm"
#define ION_SPSS_HEAP_NAME "spss"
+#define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout"
#define ION_QSECOM_HEAP_NAME "qsecom"
#define ION_QSECOM_TA_HEAP_NAME "qsecom_ta"
#define ION_SECURE_HEAP_NAME "secure_heap"
@@ -256,12 +257,6 @@
struct ion_device *ion_device_create(void);
/**
- * ion_device_destroy - free and device and it's resource
- * @dev: the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
-/**
* ion_device_add_heap - adds a heap to the ion device
* @dev: the device
* @heap: the heap to add
@@ -357,25 +352,19 @@
*/
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data);
-void ion_heap_destroy(struct ion_heap *heap);
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused);
-void ion_system_heap_destroy(struct ion_heap *heap);
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *heap);
-void ion_system_contig_heap_destroy(struct ion_heap *heap);
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data);
-void ion_carveout_heap_destroy(struct ion_heap *heap);
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data);
-void ion_chunk_heap_destroy(struct ion_heap *heap);
#ifdef CONFIG_CMA
struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data);
void ion_secure_cma_heap_destroy(struct ion_heap *heap);
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data);
-void ion_cma_heap_destroy(struct ion_heap *heap);
#else
static inline struct ion_heap
*ion_secure_cma_heap_create(struct ion_platform_heap *h)
@@ -389,15 +378,14 @@
{
return NULL;
}
-
-static inline void ion_cma_heap_destroy(struct ion_heap *h) {}
#endif
struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
-void ion_system_secure_heap_destroy(struct ion_heap *heap);
struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
-void ion_cma_secure_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *
+ion_secure_carveout_heap_create(struct ion_platform_heap *heap);
/**
* functions for creating and destroying a heap pool -- allows you
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index d59ee9f..9e2a44f 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -13,7 +13,12 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msm_ion.h>
#include "ion.h"
+#include "ion_secure_util.h"
#define ION_CARVEOUT_ALLOCATE_FAIL -1
@@ -113,7 +118,9 @@
.unmap_kernel = ion_heap_unmap_kernel,
};
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+static struct ion_heap *
+__ion_carveout_heap_create(struct ion_platform_heap *heap_data,
+ bool sync)
{
struct ion_carveout_heap *carveout_heap;
int ret;
@@ -125,7 +132,8 @@
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
- ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
+ if (sync)
+ ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
@@ -150,7 +158,12 @@
return &carveout_heap->heap;
}
-void ion_carveout_heap_destroy(struct ion_heap *heap)
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ return __ion_carveout_heap_create(heap_data, true);
+}
+
+static void ion_carveout_heap_destroy(struct ion_heap *heap)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
@@ -159,3 +172,187 @@
kfree(carveout_heap);
carveout_heap = NULL;
}
+
+struct ion_sc_entry {
+ struct list_head list;
+ struct ion_heap *heap;
+ u32 token;
+};
+
+struct ion_sc_heap {
+ struct ion_heap heap;
+ struct device *dev;
+ struct list_head children;
+};
+
+static struct ion_heap *ion_sc_find_child(struct ion_heap *heap, u32 flags)
+{
+ struct ion_sc_heap *manager;
+ struct ion_sc_entry *entry;
+
+ manager = container_of(heap, struct ion_sc_heap, heap);
+ flags = flags & ION_FLAGS_CP_MASK;
+ list_for_each_entry(entry, &manager->children, list) {
+ if (entry->token == flags)
+ return entry->heap;
+ }
+ return NULL;
+}
+
+static int ion_sc_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long flags)
+{
+ struct ion_heap *child;
+
+ /* cache maintenance is not possible on secure memory */
+ flags &= ~((unsigned long)ION_FLAG_CACHED);
+ buffer->flags = flags;
+
+ child = ion_sc_find_child(heap, flags);
+ if (!child)
+ return -EINVAL;
+ return ion_carveout_heap_allocate(child, buffer, len, flags);
+}
+
+static void ion_sc_heap_free(struct ion_buffer *buffer)
+{
+ struct ion_heap *child;
+ struct sg_table *table = buffer->sg_table;
+ struct page *page = sg_page(table->sgl);
+ phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ child = ion_sc_find_child(buffer->heap, buffer->flags);
+ if (!child) {
+ WARN(1, "ion_secure_carvout: invalid buffer flags on free. Memory will be leaked\n.");
+ return;
+ }
+
+ ion_carveout_free(child, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
+}
+
+static struct ion_heap_ops ion_sc_heap_ops = {
+ .allocate = ion_sc_heap_allocate,
+ .free = ion_sc_heap_free,
+};
+
+static int ion_sc_get_dt_token(struct ion_sc_entry *entry,
+ struct device_node *np, u64 base, u64 size)
+{
+ u32 token;
+ int ret = -EINVAL;
+
+ if (of_property_read_u32(np, "token", &token))
+ return -EINVAL;
+
+ ret = ion_hyp_assign_from_flags(base, size, token);
+ if (ret)
+ pr_err("secure_carveout_heap: Assign token 0x%x failed\n",
+ token);
+ else
+ entry->token = token;
+
+ return ret;
+}
+
+static int ion_sc_add_child(struct ion_sc_heap *manager,
+ struct device_node *np)
+{
+ struct device *dev = manager->dev;
+ struct ion_platform_heap heap_data = {0};
+ struct ion_sc_entry *entry;
+ struct device_node *phandle;
+ const __be32 *basep;
+ u64 base, size;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->list);
+
+ phandle = of_parse_phandle(np, "memory-region", 0);
+ if (!phandle)
+ goto out_free;
+
+ basep = of_get_address(phandle, 0, &size, NULL);
+ if (!basep)
+ goto out_free;
+
+ base = of_translate_address(phandle, basep);
+ if (base == OF_BAD_ADDR)
+ goto out_free;
+
+ heap_data.priv = dev;
+ heap_data.base = base;
+ heap_data.size = size;
+
+ /* This will zero memory initially */
+ entry->heap = __ion_carveout_heap_create(&heap_data, false);
+ if (IS_ERR(entry->heap))
+ goto out_free;
+
+ ret = ion_sc_get_dt_token(entry, np, base, size);
+ if (ret)
+ goto out_free_carveout;
+
+ list_add(&entry->list, &manager->children);
+ dev_info(dev, "ion_secure_carveout: creating heap@0x%llx, size 0x%llx\n",
+ base, size);
+ return 0;
+
+out_free_carveout:
+ ion_carveout_heap_destroy(entry->heap);
+out_free:
+ kfree(entry);
+ return -EINVAL;
+}
+
+static void ion_secure_carveout_heap_destroy(struct ion_heap *heap)
+{
+ struct ion_sc_heap *manager =
+ container_of(heap, struct ion_sc_heap, heap);
+ struct ion_sc_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &manager->children, list) {
+ ion_carveout_heap_destroy(entry->heap);
+ kfree(entry);
+ }
+ kfree(manager);
+}
+
+struct ion_heap *
+ion_secure_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+ struct device *dev = heap_data->priv;
+ int ret;
+ struct ion_sc_heap *manager;
+ struct device_node *np;
+
+ manager = kzalloc(sizeof(*manager), GFP_KERNEL);
+ if (!manager)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&manager->children);
+ manager->dev = dev;
+
+ for_each_child_of_node(dev->of_node, np) {
+ ret = ion_sc_add_child(manager, np);
+ if (ret) {
+ dev_err(dev, "Creating child pool %s failed\n",
+ np->name);
+ goto err;
+ }
+ }
+
+ manager->heap.ops = &ion_sc_heap_ops;
+ manager->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT;
+ return &manager->heap;
+
+err:
+ ion_secure_carveout_heap_destroy(&manager->heap);
+ return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 331ba10..1d452af 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -156,13 +156,3 @@
kfree(chunk_heap);
return ERR_PTR(ret);
}
-
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
-
- gen_pool_destroy(chunk_heap->pool);
- kfree(chunk_heap);
- chunk_heap = NULL;
-}
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index e8d7100..46bb469 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -135,17 +135,10 @@
return &cma_heap->heap;
}
-void ion_cma_heap_destroy(struct ion_heap *heap)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-
- kfree(cma_heap);
-}
-
static void ion_secure_cma_free(struct ion_buffer *buffer)
{
if (ion_hyp_unassign_sg_from_flags(buffer->sg_table, buffer->flags,
- false))
+ true))
return;
ion_cma_free(buffer);
@@ -163,7 +156,7 @@
goto out;
}
- ret = ion_hyp_assign_sg_from_flags(buffer->sg_table, flags, false);
+ ret = ion_hyp_assign_sg_from_flags(buffer->sg_table, flags, true);
if (ret)
goto out_free_buf;
@@ -175,11 +168,34 @@
return ret;
}
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ if (!hlos_accessible_buffer(buffer)) {
+ pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
+ __func__);
+ return NULL;
+ }
+ return ion_heap_map_kernel(heap, buffer);
+}
+
+static int ion_secure_cma_map_user(struct ion_heap *mapper,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ if (!hlos_accessible_buffer(buffer)) {
+ pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
+ __func__);
+ return -EINVAL;
+ }
+ return ion_heap_map_user(mapper, buffer, vma);
+}
+
static struct ion_heap_ops ion_secure_cma_ops = {
.allocate = ion_secure_cma_allocate,
.free = ion_secure_cma_free,
- .map_user = ion_heap_map_user,
- .map_kernel = ion_heap_map_kernel,
+ .map_user = ion_secure_cma_map_user,
+ .map_kernel = ion_secure_cma_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
};
@@ -205,10 +221,3 @@
cma_heap->heap.type = (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA;
return &cma_heap->heap;
}
-
-void ion_cma_secure_heap_destroy(struct ion_heap *heap)
-{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-
- kfree(cma_heap);
-}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index b2fdc91..dcee365 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -242,8 +242,9 @@
int ion_heap_init_deferred_free(struct ion_heap *heap)
{
+#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE
struct sched_param param = { .sched_priority = 0 };
-
+#endif
INIT_LIST_HEAD(&heap->free_list);
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
@@ -253,7 +254,9 @@
__func__);
return PTR_ERR_OR_ZERO(heap->task);
}
+#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE
sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
+#endif
return 0;
}
@@ -341,6 +344,9 @@
case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE:
heap = ion_system_secure_heap_create(heap_data);
break;
+ case (enum ion_heap_type)ION_HEAP_TYPE_SECURE_CARVEOUT:
+ heap = ion_secure_carveout_heap_create(heap_data);
+ break;
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap_data->type);
@@ -360,45 +366,3 @@
return heap;
}
EXPORT_SYMBOL(ion_heap_create);
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
- int heap_type;
-
- if (!heap)
- return;
-
- heap_type = heap->type;
- switch (heap_type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- ion_system_contig_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- ion_system_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- ion_carveout_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CHUNK:
- ion_chunk_heap_destroy(heap);
- break;
-#ifdef CONFIG_CMA
- case ION_HEAP_TYPE_SECURE_DMA:
- ion_secure_cma_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
- case (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA:
- ion_cma_secure_heap_destroy(heap);
- break;
-#endif
- case (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE:
- ion_system_secure_heap_destroy(heap);
- break;
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap->type);
- }
-}
-EXPORT_SYMBOL(ion_heap_destroy);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 18dff37..3158035 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/swap.h>
+#include <linux/sched/signal.h>
#include "ion.h"
@@ -64,6 +65,9 @@
BUG_ON(!pool);
+ if (fatal_signal_pending(current))
+ return ERR_PTR(-EINTR);
+
if (*from_pool && mutex_trylock(&pool->mutex)) {
if (pool->high_count)
page = ion_page_pool_remove(pool, true);
@@ -75,6 +79,9 @@
page = ion_page_pool_alloc_pages(pool);
*from_pool = false;
}
+
+ if (!page)
+ return ERR_PTR(-ENOMEM);
return page;
}
@@ -86,7 +93,7 @@
struct page *page = NULL;
if (!pool)
- return NULL;
+ return ERR_PTR(-EINVAL);
if (mutex_trylock(&pool->mutex)) {
if (pool->high_count)
@@ -96,6 +103,8 @@
mutex_unlock(&pool->mutex);
}
+ if (!page)
+ return ERR_PTR(-ENOMEM);
return page;
}
diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c
index 57f0957..20214c0 100644
--- a/drivers/staging/android/ion/ion_secure_util.c
+++ b/drivers/staging/android/ion/ion_secure_util.c
@@ -20,7 +20,8 @@
vmid == VMID_CP_CAMERA_PREVIEW ||
vmid == VMID_CP_SPSS_SP ||
vmid == VMID_CP_SPSS_SP_SHARED ||
- vmid == VMID_CP_SPSS_HLOS_SHARED);
+ vmid == VMID_CP_SPSS_HLOS_SHARED ||
+ vmid == VMID_CP_CDSP);
}
int get_secure_vmid(unsigned long flags)
@@ -47,6 +48,8 @@
return VMID_CP_SPSS_SP_SHARED;
if (flags & ION_FLAG_CP_SPSS_HLOS_SHARED)
return VMID_CP_SPSS_HLOS_SHARED;
+ if (flags & ION_FLAG_CP_CDSP)
+ return VMID_CP_CDSP;
return -EINVAL;
}
@@ -229,3 +232,44 @@
return true;
}
+
+int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags)
+{
+ u32 *vmids, *modes;
+ u32 nr, i;
+ int ret = -EINVAL;
+ u32 src_vm = VMID_HLOS;
+
+ nr = count_set_bits(flags);
+ vmids = kcalloc(nr, sizeof(*vmids), GFP_KERNEL);
+ if (!vmids)
+ return -ENOMEM;
+
+ modes = kcalloc(nr, sizeof(*modes), GFP_KERNEL);
+ if (!modes) {
+ kfree(vmids);
+ return -ENOMEM;
+ }
+
+ if ((flags & ~ION_FLAGS_CP_MASK) ||
+ populate_vm_list(flags, vmids, nr)) {
+ pr_err("%s: Failed to parse secure flags 0x%x\n", __func__,
+ flags);
+ goto out;
+ }
+
+ for (i = 0; i < nr; i++)
+ if (vmids[i] == VMID_CP_SEC_DISPLAY)
+ modes[i] = PERM_READ;
+ else
+ modes[i] = PERM_READ | PERM_WRITE;
+
+ ret = hyp_assign_phys(base, size, &src_vm, 1, vmids, modes, nr);
+ if (ret)
+ pr_err("%s: Assign call failed, flags 0x%x\n", __func__, flags);
+
+out:
+ kfree(modes);
+ kfree(vmids);
+ return ret;
+}
diff --git a/drivers/staging/android/ion/ion_secure_util.h b/drivers/staging/android/ion/ion_secure_util.h
index 841c64c..8f04983 100644
--- a/drivers/staging/android/ion/ion_secure_util.h
+++ b/drivers/staging/android/ion/ion_secure_util.h
@@ -18,6 +18,7 @@
bool set_page_private);
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
bool set_page_private);
+int ion_hyp_assign_from_flags(u64 base, u64 size, unsigned long flags);
bool hlos_accessible_buffer(struct ion_buffer *buffer);
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index fc82d42..aa781f5 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -48,6 +48,11 @@
u32 size;
};
+int ion_heap_is_system_heap_type(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM);
+}
+
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order,
@@ -68,8 +73,8 @@
page = ion_page_pool_alloc(pool, from_pool);
- if (!page)
- return 0;
+ if (IS_ERR(page))
+ return page;
if ((MAKE_ION_ALLOC_DMA_READY && vmid <= 0) || !(*from_pool))
ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
@@ -120,7 +125,7 @@
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
- return NULL;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < NUM_ORDERS; i++) {
if (size < order_to_size(orders[i]))
@@ -129,7 +134,7 @@
continue;
from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC);
page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
- if (!page)
+ if (IS_ERR(page))
continue;
info->page = page;
@@ -140,7 +145,7 @@
}
kfree(info);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
static struct page_info *
@@ -155,7 +160,7 @@
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
- return NULL;
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < NUM_ORDERS; i++) {
if (size < order_to_size(orders[i]))
@@ -164,7 +169,7 @@
continue;
page = alloc_from_secure_pool_order(heap, buffer, orders[i]);
- if (!page)
+ if (IS_ERR(page))
continue;
info->page = page;
@@ -175,7 +180,7 @@
}
page = split_page_from_secure_pool(heap, buffer);
- if (page) {
+ if (!IS_ERR(page)) {
info->page = page;
info->order = 0;
info->from_pool = true;
@@ -259,7 +264,7 @@
struct sg_table table_sync = {0};
struct scatterlist *sg;
struct scatterlist *sg_sync;
- int ret;
+ int ret = -ENOMEM;
struct list_head pages;
struct list_head pages_from_pool;
struct page_info *info, *tmp_info;
@@ -274,6 +279,13 @@
if (size / PAGE_SIZE > totalram_pages / 2)
return -ENOMEM;
+ if (ion_heap_is_system_heap_type(buffer->heap->type) &&
+ is_secure_vmid_valid(vmid)) {
+ pr_info("%s: System heap doesn't support secure allocations\n",
+ __func__);
+ return -EINVAL;
+ }
+
data.size = 0;
INIT_LIST_HEAD(&pages);
INIT_LIST_HEAD(&pages_from_pool);
@@ -288,8 +300,10 @@
size_remaining,
max_order);
- if (!info)
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto err;
+ }
sz = (1 << info->order) * PAGE_SIZE;
@@ -371,7 +385,7 @@
err_free_sg2:
/* We failed to zero buffers. Bypass pool */
- buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+ buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
if (vmid > 0)
ion_hyp_unassign_sg(table, &vmid, 1, true);
@@ -396,7 +410,7 @@
free_buffer_page(sys_heap, buffer, info->page, info->order);
kfree(info);
}
- return -ENOMEM;
+ return ret;
}
void ion_system_heap_free(struct ion_buffer *buffer)
@@ -582,8 +596,10 @@
int i;
for (i = 0; i < NUM_ORDERS; i++)
- if (pools[i])
+ if (pools[i]) {
ion_page_pool_destroy(pools[i]);
+ pools[i] = NULL;
+ }
}
/**
@@ -654,26 +670,6 @@
return ERR_PTR(-ENOMEM);
}
-void ion_system_heap_destroy(struct ion_heap *heap)
-{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int i, j;
-
- for (i = 0; i < VMID_LAST; i++) {
- if (!is_secure_vmid_valid(i))
- continue;
- for (j = 0; j < NUM_ORDERS; j++)
- ion_secure_page_pool_shrink(sys_heap, i, j, UINT_MAX);
-
- ion_system_heap_destroy_pools(sys_heap->secure_pools[i]);
- }
- ion_system_heap_destroy_pools(sys_heap->uncached_pools);
- ion_system_heap_destroy_pools(sys_heap->cached_pools);
- kfree(sys_heap);
-}
-
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len,
@@ -754,8 +750,3 @@
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
return heap;
}
-
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
-{
- kfree(heap);
-}
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
index 6d2d5ee..96e98e2f 100644
--- a/drivers/staging/android/ion/ion_system_secure_heap.c
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -98,8 +98,8 @@
int ret;
int vmid;
+ memset(&buffer, 0, sizeof(struct ion_buffer));
buffer.heap = sys_heap;
- buffer.flags = 0;
ret = sys_heap->ops->allocate(sys_heap, &buffer, info->size,
buffer.flags);
@@ -156,6 +156,7 @@
size_t pool_size, size;
int ret;
+ memset(&buffer, 0, sizeof(struct ion_buffer));
buffer.heap = sys_heap;
buffer.flags = info->vmid;
@@ -355,31 +356,6 @@
return &heap->heap;
}
-void ion_system_secure_heap_destroy(struct ion_heap *heap)
-{
- struct ion_system_secure_heap *secure_heap = container_of(heap,
- struct ion_system_secure_heap,
- heap);
- unsigned long flags;
- LIST_HEAD(items);
- struct prefetch_info *info, *tmp;
-
- /* Stop any pending/future work */
- spin_lock_irqsave(&secure_heap->work_lock, flags);
- secure_heap->destroy_heap = true;
- list_splice_init(&secure_heap->prefetch_list, &items);
- spin_unlock_irqrestore(&secure_heap->work_lock, flags);
-
- cancel_delayed_work_sync(&secure_heap->prefetch_work);
-
- list_for_each_entry_safe(info, tmp, &items, list) {
- list_del(&info->list);
- kfree(info);
- }
-
- kfree(heap);
-}
-
struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order)
@@ -388,7 +364,7 @@
struct ion_page_pool *pool;
if (!is_secure_vmid_valid(vmid))
- return NULL;
+ return ERR_PTR(-EINVAL);
pool = heap->secure_pools[vmid][order_to_index(order)];
return ion_page_pool_alloc_pool_only(pool);
@@ -410,13 +386,13 @@
* possible.
*/
page = alloc_from_secure_pool_order(heap, buffer, 0);
- if (page)
+ if (!IS_ERR(page))
goto got_page;
for (i = NUM_ORDERS - 2; i >= 0; i--) {
order = orders[i];
page = alloc_from_secure_pool_order(heap, buffer, order);
- if (!page)
+ if (IS_ERR(page))
continue;
split_page(page, order);
@@ -426,7 +402,7 @@
* Return the remaining order-0 pages to the pool.
* SetPagePrivate flag to mark memory as secure.
*/
- if (page) {
+ if (!IS_ERR(page)) {
for (j = 1; j < (1 << order); j++) {
SetPagePrivate(page + j);
free_buffer_page(heap, buffer, page + j, 0);
@@ -454,7 +430,7 @@
while (freed < nr_to_scan) {
page = ion_page_pool_alloc_pool_only(pool);
- if (!page)
+ if (IS_ERR(page))
break;
list_add(&page->lru, &pages);
freed += (1 << order);
diff --git a/drivers/staging/android/ion/msm/msm_ion_of.c b/drivers/staging/android/ion/msm/msm_ion_of.c
index 04a998c..c9177825 100644
--- a/drivers/staging/android/ion/msm/msm_ion_of.c
+++ b/drivers/staging/android/ion/msm/msm_ion_of.c
@@ -12,7 +12,6 @@
#include <linux/of_address.h>
#include <linux/dma-contiguous.h>
#include <linux/cma.h>
-#include <linux/module.h>
#include "../ion.h"
#define ION_COMPAT_STR "qcom,msm-ion"
@@ -64,6 +63,10 @@
{
.id = ION_AUDIO_HEAP_ID,
.name = ION_AUDIO_HEAP_NAME,
+ },
+ {
+ .id = ION_SECURE_CARVEOUT_HEAP_ID,
+ .name = ION_SECURE_CARVEOUT_HEAP_NAME,
}
};
#endif
@@ -79,6 +82,7 @@
MAKE_HEAP_TYPE_MAPPING(SYSTEM),
MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
+ MAKE_HEAP_TYPE_MAPPING(SECURE_CARVEOUT),
MAKE_HEAP_TYPE_MAPPING(CHUNK),
MAKE_HEAP_TYPE_MAPPING(DMA),
MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
@@ -345,19 +349,6 @@
return err;
}
-static int msm_ion_remove(struct platform_device *pdev)
-{
- struct ion_device *idev = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < num_heaps; i++)
- ion_heap_destroy(heaps[i]);
-
- ion_device_destroy(idev);
- kfree(heaps);
- return 0;
-}
-
static const struct of_device_id msm_ion_match_table[] = {
{.compatible = ION_COMPAT_STR},
{},
@@ -365,7 +356,6 @@
static struct platform_driver msm_ion_driver = {
.probe = msm_ion_probe,
- .remove = msm_ion_remove,
.driver = {
.name = "ion-msm",
.of_match_table = msm_ion_match_table,
@@ -377,9 +367,4 @@
return platform_driver_register(&msm_ion_driver);
}
-static void __exit msm_ion_exit(void)
-{
- platform_driver_unregister(&msm_ion_driver);
-}
subsys_initcall(msm_ion_init);
-module_exit(msm_ion_exit);
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 2357ebb..3f47ce2 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -21,6 +21,7 @@
ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
ION_HEAP_TYPE_SYSTEM_SECURE,
ION_HEAP_TYPE_HYP_CMA,
+ ION_HEAP_TYPE_SECURE_CARVEOUT,
};
/**
@@ -48,6 +49,7 @@
* Newly added heap ids have to be #define(d) since all API changes must
* include a new #define.
*/
+#define ION_SECURE_CARVEOUT_HEAP_ID 14
#define ION_QSECOM_TA_HEAP_ID 19
#define ION_AUDIO_HEAP_ID 28
#define ION_CAMERA_HEAP_ID 20
@@ -66,6 +68,8 @@
#define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25)
#define ION_FLAG_CP_APP ION_BIT(26)
#define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27)
+/* ION_FLAG_ALLOW_NON_CONTIG uses ION_BIT(28) */
+#define ION_FLAG_CP_CDSP ION_BIT(29)
#define ION_FLAG_CP_SPSS_HLOS_SHARED ION_BIT(30)
#define ION_FLAGS_CP_MASK 0x7FFF0000
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index bb6e36a..4975e04 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -93,18 +93,18 @@
#define GSI_TRB_ADDR_BIT_53_MASK (1 << 21)
#define GSI_TRB_ADDR_BIT_55_MASK (1 << 23)
-#define GSI_GENERAL_CFG_REG (QSCRATCH_REG_OFFSET + 0xFC)
+#define GSI_GENERAL_CFG_REG(offset) (QSCRATCH_REG_OFFSET + offset)
#define GSI_RESTART_DBL_PNTR_MASK BIT(20)
#define GSI_CLK_EN_MASK BIT(12)
#define BLOCK_GSI_WR_GO_MASK BIT(1)
#define GSI_EN_MASK BIT(0)
-#define GSI_DBL_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
-#define GSI_DBL_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
-#define GSI_RING_BASE_ADDR_L(n) ((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
-#define GSI_RING_BASE_ADDR_H(n) ((QSCRATCH_REG_OFFSET + 0x144) + (n*4))
+#define GSI_DBL_ADDR_L(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4))
+#define GSI_DBL_ADDR_H(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4))
+#define GSI_RING_BASE_ADDR_L(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4))
+#define GSI_RING_BASE_ADDR_H(offset, n) ((QSCRATCH_REG_OFFSET + offset) + (n*4))
-#define GSI_IF_STS (QSCRATCH_REG_OFFSET + 0x1A4)
+#define GSI_IF_STS(offset) (QSCRATCH_REG_OFFSET + offset)
#define GSI_WR_CTRL_STATE_MASK BIT(15)
#define DWC3_GEVNTCOUNT_EVNTINTRPTMASK (1 << 31)
@@ -112,6 +112,16 @@
#define DWC3_GEVNTADRHI_EVNTADRHI_GSI_IDX(n) (n << 16)
#define DWC3_GEVENT_TYPE_GSI 0x3
+enum usb_gsi_reg {
+ GENERAL_CFG_REG,
+ DBL_ADDR_L,
+ DBL_ADDR_H,
+ RING_BASE_ADDR_L,
+ RING_BASE_ADDR_H,
+ IF_STS,
+ GSI_REG_MAX,
+};
+
struct dwc3_msm_req_complete {
struct list_head list_item;
struct usb_request *req;
@@ -262,6 +272,8 @@
struct mutex suspend_resume_mutex;
enum usb_device_speed override_usb_speed;
+ u32 *gsi_reg;
+ int gsi_reg_offset_cnt;
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -926,8 +938,9 @@
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
int n = ep->ep_intr_num - 1;
- dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
- dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
+ dwc3_msm_write_reg(mdwc->base,
+ GSI_RING_BASE_ADDR_L(mdwc->gsi_reg[RING_BASE_ADDR_L], (n)),
+ dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
if (request->mapped_db_reg_phs_addr_lsb)
dma_unmap_resource(dwc->sysdev,
@@ -944,12 +957,16 @@
ep->name, request->db_reg_phs_addr_lsb,
(unsigned long long)request->mapped_db_reg_phs_addr_lsb);
- dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n),
- (u32)request->mapped_db_reg_phs_addr_lsb);
+ dwc3_msm_write_reg(mdwc->base,
+ GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n)),
+ (u32)request->mapped_db_reg_phs_addr_lsb);
dev_dbg(mdwc->dev, "Ring Base Addr %d: %x (LSB)\n", n,
- dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
+ dwc3_msm_read_reg(mdwc->base,
+ GSI_RING_BASE_ADDR_L(mdwc->gsi_reg[RING_BASE_ADDR_L],
+ (n))));
dev_dbg(mdwc->dev, "GSI DB Addr %d: %x (LSB)\n", n,
- dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
+ dwc3_msm_read_reg(mdwc->base,
+ GSI_DBL_ADDR_L(mdwc->gsi_reg[DBL_ADDR_L], (n))));
}
/**
@@ -1282,14 +1299,18 @@
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
dwc3_msm_write_reg_field(mdwc->base,
- GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
+ GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+ GSI_CLK_EN_MASK, 1);
dwc3_msm_write_reg_field(mdwc->base,
- GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
+ GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+ GSI_RESTART_DBL_PNTR_MASK, 1);
dwc3_msm_write_reg_field(mdwc->base,
- GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
+ GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+ GSI_RESTART_DBL_PNTR_MASK, 0);
dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
dwc3_msm_write_reg_field(mdwc->base,
- GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
+ GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+ GSI_EN_MASK, 1);
}
/**
@@ -1308,7 +1329,8 @@
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
dwc3_msm_write_reg_field(mdwc->base,
- GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
+ GSI_GENERAL_CFG_REG(mdwc->gsi_reg[GENERAL_CFG_REG]),
+ BLOCK_GSI_WR_GO_MASK, block_db);
}
/**
@@ -1325,7 +1347,7 @@
struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
while (dwc3_msm_read_reg_field(mdwc->base,
- GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
+ GSI_IF_STS(mdwc->gsi_reg[IF_STS]), GSI_WR_CTRL_STATE_MASK)) {
if (!timeout--) {
dev_err(mdwc->dev,
"Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
@@ -3282,7 +3304,7 @@
struct dwc3 *dwc;
struct resource *res;
bool host_mode;
- int ret = 0, i;
+ int ret = 0, size = 0, i;
u32 val;
unsigned long irq_type;
@@ -3435,6 +3457,29 @@
ret = of_property_read_u32(node, "qcom,num-gsi-evt-buffs",
&mdwc->num_gsi_event_buffers);
+ if (mdwc->num_gsi_event_buffers) {
+ of_get_property(node, "qcom,gsi-reg-offset", &size);
+ if (size) {
+ mdwc->gsi_reg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!mdwc->gsi_reg)
+ return -ENOMEM;
+
+ mdwc->gsi_reg_offset_cnt =
+ (size / sizeof(*mdwc->gsi_reg));
+ if (mdwc->gsi_reg_offset_cnt != GSI_REG_MAX) {
+ dev_err(dev, "invalid reg offset count\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,gsi-reg-offset", mdwc->gsi_reg,
+ mdwc->gsi_reg_offset_cnt);
+ } else {
+ dev_err(dev, "err provide qcom,gsi-reg-offset\n");
+ return -EINVAL;
+ }
+ }
+
mdwc->use_pdc_interrupts = of_property_read_bool(node,
"qcom,use-pdc-interrupts");
dwc3_set_notifier(&dwc3_msm_notify_event);
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 99790f9..4d7101c 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -39,6 +39,7 @@
#define MSM_BUS_FAB_CAMNOC_VIRT 6154
#define MSM_BUS_FAB_COMP_NOC 6155
#define MSM_BUS_FAB_GEM_NOC 6156
+#define MSM_BUS_FAB_NPU_NOC 6157
#define MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
#define MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -89,8 +90,10 @@
#define MSM_BUS_BCM_CO1 7041
#define MSM_BUS_BCM_CO2 7042
+#define MSM_BUS_RSC_FIRST 8000
#define MSM_BUS_RSC_APPS 8000
#define MSM_BUS_RSC_DISP 8001
+#define MSM_BUS_RSC_LAST 8001
#define MSM_BUS_BCM_MC0_DISPLAY 27000
#define MSM_BUS_BCM_SH0_DISPLAY 27001
@@ -271,6 +274,11 @@
#define MSM_BUS_MASTER_PCIE_3 167
#define MSM_BUS_MASTER_LPASS_ANOC 168
#define MSM_BUS_MASTER_USB2 169
+#define MSM_BUS_MASTER_CAMNOC_HF 170
+#define MSM_BUS_MASTER_CAMNOC_ICP 171
+#define MSM_BUS_MASTER_NPU_SYS 172
+#define MSM_BUS_MASTER_NPU_CDP 173
+#define MSM_BUS_MASTER_NPU_NOC_CFG 174
#define MSM_BUS_MASTER_LLCC_DISPLAY 20000
#define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -644,6 +652,21 @@
#define MSM_BUS_SLAVE_DC_NOC_GEMNOC 803
#define MSM_BUS_SLAVE_MEM_NOC_PCIE_SNOC 804
#define MSM_BUS_SLAVE_USB2 805
+#define MSM_BUS_SLAVE_ANOC_PCIE_GEM_NOC_1 806
+#define MSM_BUS_SLAVE_AHB2PHY_NORTH 807
+#define MSM_BUS_SLAVE_CX_RDPM 808
+#define MSM_BUS_SLAVE_IPC_ROUTER_CFG 809
+#define MSM_BUS_SLAVE_SERVICE_GEM_NOC_2 810
+#define MSM_BUS_SLAVE_NPU_CAL_DP0 811
+#define MSM_BUS_SLAVE_NPU_CAL_DP1 812
+#define MSM_BUS_SLAVE_NPU_CP 813
+#define MSM_BUS_SLAVE_NPU_INT_DMA_BWMON_CFG 814
+#define MSM_BUS_SLAVE_NPU_DPM 815
+#define MSM_BUS_SLAVE_ISENSE_CFG 816
+#define MSM_BUS_SLAVE_NPU_LLM_CFG 817
+#define MSM_BUS_SLAVE_NPU_TCM 818
+#define MSM_BUS_SLAVE_NPU_COMPUTE_NOC 819
+#define MSM_BUS_SLAVE_SERVICE_NPU_NOC 820
#define MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
#define MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/dt-bindings/msm/msm-bus-rule-ops.h b/include/dt-bindings/msm/msm-bus-rule-ops.h
new file mode 100644
index 0000000..12e7982
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-rule-ops.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB 0
+#define FLD_AB 1
+#define FLD_CLK 2
+
+#define OP_LE 0
+#define OP_LT 1
+#define OP_GE 2
+#define OP_GT 3
+#define OP_NOOP 4
+
+#define RULE_STATE_NOT_APPLIED 0
+#define RULE_STATE_APPLIED 1
+
+#define THROTTLE_ON 0
+#define THROTTLE_OFF 1
+#define THROTTLE_REG 2
+
+
+#endif
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h
index a4f5d06..8221606 100644
--- a/include/linux/coresight-stm.h
+++ b/include/linux/coresight-stm.h
@@ -84,6 +84,8 @@
spinlock_t spinlock;
struct channel_space chs;
bool enable;
+ struct channel_space debug_status_chs;
+ bool master_enable;
DECLARE_BITMAP(entities, OST_ENTITY_MAX);
struct stm_data stm;
local_t mode;
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 86612c7..e2e4208 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -290,6 +290,8 @@
extern int of_get_coresight_csr_name(struct device_node *node,
const char **csr_name);
+extern struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node);
#else
static inline int of_coresight_get_cpu(const struct device_node *node)
{ return 0; }
@@ -297,6 +299,8 @@
struct device *dev, const struct device_node *node) { return NULL; }
static inline int of_get_coresight_csr_name(struct device_node *node,
const char **csr_name){ return -EINVAL; }
+static inlint struct coresight_cti_data *of_get_coresight_cti_data(
+ struct device *dev, struct device_node *node) { return NULL; }
#endif
#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 147bdec..07f2d16 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -54,6 +54,7 @@
* cpu_present_mask - has bit 'cpu' set iff cpu is populated
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration
+ * cpu_isolated_mask- has bit 'cpu' set iff cpu isolated
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
@@ -90,29 +91,35 @@
extern struct cpumask __cpu_online_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
+extern struct cpumask __cpu_isolated_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
+#define cpu_isolated_mask ((const struct cpumask *)&__cpu_isolated_mask)
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
+#define num_isolated_cpus() cpumask_weight(cpu_isolated_mask)
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
+#define cpu_isolated(cpu) cpumask_test_cpu((cpu), cpu_isolated_mask)
#else
#define num_online_cpus() 1U
#define num_possible_cpus() 1U
#define num_present_cpus() 1U
#define num_active_cpus() 1U
+#define num_isolated_cpus() 0U
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
#define cpu_active(cpu) ((cpu) == 0)
+#define cpu_isolated(cpu) ((cpu) != 0)
#endif
static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
@@ -777,6 +784,7 @@
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#define for_each_isolated_cpu(cpu) for_each_cpu((cpu), cpu_isolated_mask)
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
@@ -824,6 +832,15 @@
cpumask_clear_cpu(cpu, &__cpu_active_mask);
}
+static inline void
+set_cpu_isolated(unsigned int cpu, bool isolated)
+{
+ if (isolated)
+ cpumask_set_cpu(cpu, &__cpu_isolated_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_isolated_mask);
+}
+
/**
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3ba898b..23cae96 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -100,6 +100,8 @@
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+
+#define IOMMU_DOMAIN_NAME_LEN 32
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
@@ -109,6 +111,7 @@
struct iommu_domain_geometry geometry;
void *iova_cookie;
bool is_debug_domain;
+ char name[IOMMU_DOMAIN_NAME_LEN];
};
enum iommu_cap {
@@ -129,6 +132,11 @@
* DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
* The caller can invoke iommu_domain_get_attr to check if the underlying
* iommu implementation supports these constraints.
+ *
+ * DOMAIN_ATTR_NO_CFRE
+ * Some bus implementations may enter a bad state if iommu reports an error
+ * on context fault. As context faults are not always fatal, this must be
+ * avoided.
*/
enum iommu_attr {
@@ -159,6 +167,7 @@
DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR,
DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN,
DOMAIN_ATTR_USE_LLC_NWA,
+ DOMAIN_ATTR_NO_CFRE,
DOMAIN_ATTR_MAX,
};
@@ -362,6 +371,9 @@
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size);
+extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot);
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
@@ -443,13 +455,6 @@
domain->ops->iotlb_sync(domain);
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
-{
- return domain->ops->map_sg(domain, iova, sg, nents, prot);
-}
-
extern void iommu_trigger_fault(struct iommu_domain *domain,
unsigned long flags);
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 928442d..3acb4f5 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -74,6 +74,7 @@
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
+ unsigned long end_pfn; /* Upper limit for this domain */
unsigned long dma_32bit_pfn;
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
index 9af4867..519682d 100644
--- a/include/linux/msm-bus.h
+++ b/include/linux/msm-bus.h
@@ -211,7 +211,6 @@
struct platform_device *pdev, struct device_node *of_node);
struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
struct msm_bus_scale_pdata *msm_bus_cl_get_pdata_from_dev(struct device *dev);
-void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
#else
static inline struct msm_bus_scale_pdata
*msm_bus_cl_get_pdata(struct platform_device *pdev)
@@ -224,11 +223,11 @@
{
return NULL;
}
+#endif
static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
{
}
-#endif
#ifdef CONFIG_DEBUG_BUS_VOTER
int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 5025cba..1e04b07 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -46,6 +46,7 @@
POWER_SUPPLY_CHARGE_TYPE_NONE,
POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
POWER_SUPPLY_CHARGE_TYPE_FAST,
+ POWER_SUPPLY_CHARGE_TYPE_TAPER,
};
enum {
@@ -58,6 +59,9 @@
POWER_SUPPLY_HEALTH_COLD,
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+ POWER_SUPPLY_HEALTH_WARM,
+ POWER_SUPPLY_HEALTH_COOL,
+ POWER_SUPPLY_HEALTH_HOT,
};
enum {
@@ -85,6 +89,62 @@
POWER_SUPPLY_SCOPE_DEVICE,
};
+enum {
+ POWER_SUPPLY_DP_DM_UNKNOWN = 0,
+ POWER_SUPPLY_DP_DM_PREPARE = 1,
+ POWER_SUPPLY_DP_DM_UNPREPARE = 2,
+ POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3 = 3,
+ POWER_SUPPLY_DP_DM_DP_PULSE = 4,
+ POWER_SUPPLY_DP_DM_DM_PULSE = 5,
+ POWER_SUPPLY_DP_DM_DP0P6_DMF = 6,
+ POWER_SUPPLY_DP_DM_DP0P6_DM3P3 = 7,
+ POWER_SUPPLY_DP_DM_DPF_DMF = 8,
+ POWER_SUPPLY_DP_DM_DPR_DMR = 9,
+ POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
+ POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
+ POWER_SUPPLY_DP_DM_ICL_UP = 12,
+ POWER_SUPPLY_DP_DM_FORCE_5V = 13,
+ POWER_SUPPLY_DP_DM_FORCE_9V = 14,
+ POWER_SUPPLY_DP_DM_FORCE_12V = 15,
+};
+
+enum {
+ POWER_SUPPLY_PL_NONE,
+ POWER_SUPPLY_PL_USBIN_USBIN,
+ POWER_SUPPLY_PL_USBIN_USBIN_EXT,
+ POWER_SUPPLY_PL_USBMID_USBMID,
+};
+
+enum {
+ POWER_SUPPLY_CHARGER_SEC_NONE = 0,
+ POWER_SUPPLY_CHARGER_SEC_CP,
+ POWER_SUPPLY_CHARGER_SEC_PL,
+ POWER_SUPPLY_CHARGER_SEC_CP_PL,
+};
+
+enum {
+ POWER_SUPPLY_CP_NONE = 0,
+ POWER_SUPPLY_CP_HVDCP3,
+ POWER_SUPPLY_CP_PPS,
+ POWER_SUPPLY_CP_WIRELESS,
+};
+
+enum {
+ POWER_SUPPLY_CONNECTOR_TYPEC,
+ POWER_SUPPLY_CONNECTOR_MICRO_USB,
+};
+
+enum {
+ POWER_SUPPLY_PL_STACKED_BATFET,
+ POWER_SUPPLY_PL_NON_STACKED_BATFET,
+};
+
+enum {
+ POWER_SUPPLY_PD_INACTIVE = 0,
+ POWER_SUPPLY_PD_ACTIVE,
+ POWER_SUPPLY_PD_PPS_ACTIVE,
+};
+
enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
@@ -155,12 +215,112 @@
POWER_SUPPLY_PROP_USB_HC,
POWER_SUPPLY_PROP_USB_OTG,
POWER_SUPPLY_PROP_CHARGE_ENABLED,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
+ POWER_SUPPLY_PROP_REAL_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+ POWER_SUPPLY_PROP_PIN_ENABLED,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+ POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
+ POWER_SUPPLY_PROP_HI_POWER,
+ POWER_SUPPLY_PROP_LOW_POWER,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_COLD_TEMP,
+ POWER_SUPPLY_PROP_HOT_TEMP,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, /* in Ohms */
+ POWER_SUPPLY_PROP_RESISTANCE_NOW,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_BUCK_FREQ,
+ POWER_SUPPLY_PROP_BOOST_CURRENT,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+ POWER_SUPPLY_PROP_CHARGE_DONE,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ POWER_SUPPLY_PROP_FORCE_TLIM,
+ POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+ POWER_SUPPLY_PROP_CURRENT_QNOVO,
+ POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+ POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+ POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+ POWER_SUPPLY_PROP_TYPEC_MODE,
+ POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, /* 0: N/C, 1: CC1, 2: CC2 */
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+ POWER_SUPPLY_PROP_TYPEC_SRC_RP,
+ POWER_SUPPLY_PROP_PD_ALLOWED,
+ POWER_SUPPLY_PROP_PD_ACTIVE,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+ POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+ POWER_SUPPLY_PROP_CHARGER_TEMP,
+ POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+ POWER_SUPPLY_PROP_PE_START,
+ POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+ POWER_SUPPLY_PROP_DEBUG_BATTERY,
+ POWER_SUPPLY_PROP_FCC_DELTA,
+ POWER_SUPPLY_PROP_ICL_REDUCTION,
+ POWER_SUPPLY_PROP_PARALLEL_MODE,
+ POWER_SUPPLY_PROP_DIE_HEALTH,
+ POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+ POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+ POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+ POWER_SUPPLY_PROP_PR_SWAP,
+ POWER_SUPPLY_PROP_CC_STEP,
+ POWER_SUPPLY_PROP_CC_STEP_SEL,
+ POWER_SUPPLY_PROP_SW_JEITA_ENABLED,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_SDP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONNECTOR_TYPE,
+ POWER_SUPPLY_PROP_PARALLEL_BATFET_MODE,
+ POWER_SUPPLY_PROP_PARALLEL_FCC_MAX,
+ POWER_SUPPLY_PROP_MIN_ICL,
+ POWER_SUPPLY_PROP_MOISTURE_DETECTED,
+ POWER_SUPPLY_PROP_BATT_PROFILE_VERSION,
+ POWER_SUPPLY_PROP_BATT_FULL_CURRENT,
+ POWER_SUPPLY_PROP_RECHARGE_SOC,
+ POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
+ POWER_SUPPLY_PROP_SMB_EN_MODE,
+ POWER_SUPPLY_PROP_SMB_EN_REASON,
+ POWER_SUPPLY_PROP_ESR_ACTUAL,
+ POWER_SUPPLY_PROP_ESR_NOMINAL,
+ POWER_SUPPLY_PROP_SOH,
+ POWER_SUPPLY_PROP_CLEAR_SOH,
+ POWER_SUPPLY_PROP_FORCE_RECHARGE,
+ POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
+ POWER_SUPPLY_PROP_TOGGLE_STAT,
+ POWER_SUPPLY_PROP_MAIN_FCC_MAX,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
+ POWER_SUPPLY_PROP_CYCLE_COUNTS,
};
enum power_supply_type {
@@ -176,6 +336,16 @@
POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery Port */
POWER_SUPPLY_TYPE_USB_PD_DRP, /* PD Dual Role Port */
POWER_SUPPLY_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
+ POWER_SUPPLY_TYPE_USB_HVDCP, /* High Voltage DCP */
+ POWER_SUPPLY_TYPE_USB_HVDCP_3, /* Efficient High Voltage DCP */
+ POWER_SUPPLY_TYPE_WIRELESS, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_USB_FLOAT, /* Floating charger */
+ POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */
+ POWER_SUPPLY_TYPE_PARALLEL, /* Parallel Path */
+ POWER_SUPPLY_TYPE_MAIN, /* Main Path */
+ POWER_SUPPLY_TYPE_WIPOWER, /* Wipower */
+ POWER_SUPPLY_TYPE_UFP, /* Type-C UFP */
+ POWER_SUPPLY_TYPE_DFP, /* Type-C DFP */
};
enum power_supply_usb_type {
@@ -191,6 +361,37 @@
POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
};
+/* Indicates USB Type-C CC connection status */
+enum power_supply_typec_mode {
+ POWER_SUPPLY_TYPEC_NONE,
+
+ /* Acting as source */
+ POWER_SUPPLY_TYPEC_SINK, /* Rd only */
+ POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE, /* Rd/Ra */
+ POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY,/* Rd/Rd */
+ POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER, /* Ra/Ra */
+ POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY, /* Ra only */
+
+ /* Acting as sink */
+ POWER_SUPPLY_TYPEC_SOURCE_DEFAULT, /* Rp default */
+ POWER_SUPPLY_TYPEC_SOURCE_MEDIUM, /* Rp 1.5A */
+ POWER_SUPPLY_TYPEC_SOURCE_HIGH, /* Rp 3A */
+ POWER_SUPPLY_TYPEC_NON_COMPLIANT,
+};
+
+enum power_supply_typec_src_rp {
+ POWER_SUPPLY_TYPEC_SRC_RP_STD,
+ POWER_SUPPLY_TYPEC_SRC_RP_1P5A,
+ POWER_SUPPLY_TYPEC_SRC_RP_3A
+};
+
+enum power_supply_typec_power_role {
+ POWER_SUPPLY_TYPEC_PR_NONE, /* CC lines in high-Z */
+ POWER_SUPPLY_TYPEC_PR_DUAL,
+ POWER_SUPPLY_TYPEC_PR_SINK,
+ POWER_SUPPLY_TYPEC_PR_SOURCE,
+};
+
enum power_supply_notifier_events {
PSY_EVENT_PROP_CHANGED,
};
diff --git a/include/linux/show_mem_notifier.h b/include/linux/show_mem_notifier.h
new file mode 100644
index 0000000..17d32fe
--- /dev/null
+++ b/include/linux/show_mem_notifier.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/notifier.h>
+
+int show_mem_notifier_register(struct notifier_block *nb);
+
+int show_mem_notifier_unregister(struct notifier_block *nb);
+
+void show_mem_call_notifiers(void);
diff --git a/include/linux/soc/qcom/fsa4480-i2c.h b/include/linux/soc/qcom/fsa4480-i2c.h
new file mode 100644
index 0000000..dd69e6f
--- /dev/null
+++ b/include/linux/soc/qcom/fsa4480-i2c.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef FSA4480_I2C_H
+#define FSA4480_I2C_H
+
+#include <linux/of.h>
+#include <linux/notifier.h>
+
+enum fsa_function {
+ FSA_MIC_GND_SWAP,
+ FSA_USBC_ORIENTATION_CC1,
+ FSA_USBC_ORIENTATION_CC2,
+ FSA_USBC_DISPLAYPORT_DISCONNECTED,
+ FSA_EVENT_MAX,
+};
+
+#ifdef CONFIG_QCOM_FSA4480_I2C
+int fsa4480_switch_event(struct device_node *node,
+ enum fsa_function event);
+int fsa4480_reg_notifier(struct notifier_block *nb,
+ struct device_node *node);
+int fsa4480_unreg_notifier(struct notifier_block *nb,
+ struct device_node *node);
+#else
+static inline int fsa4480_switch_event(struct device_node *node,
+ enum fsa_function event)
+{
+ return 0;
+}
+
+static inline int fsa4480_reg_notifier(struct notifier_block *nb,
+ struct device_node *node)
+{
+ return 0;
+}
+
+static inline int fsa4480_unreg_notifier(struct notifier_block *nb,
+ struct device_node *node)
+{
+ return 0;
+}
+#endif /* CONFIG_QCOM_FSA4480_I2C */
+
+#endif /* FSA4480_I2C_H */
+
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 47a3441..284df30 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -22,7 +22,7 @@
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
-enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
FOR_ALL_ZONES(ALLOCSTALL),
FOR_ALL_ZONES(PGSCAN_SKIP),
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 89ae96f..296922f 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -32,6 +32,7 @@
VMID_CP_CAMERA_PREVIEW = 0x1D,
VMID_CP_SPSS_SP_SHARED = 0x22,
VMID_CP_SPSS_HLOS_SHARED = 0x24,
+ VMID_CP_CDSP = 0x2A,
VMID_LAST,
VMID_INVAL = -1
};
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 72b4582..0db6f7f 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -12,8 +12,10 @@
#define _TRACE_IOMMU_H
#include <linux/tracepoint.h>
+#include <linux/iommu.h>
struct device;
+struct iommu_domain;
DECLARE_EVENT_CLASS(iommu_group_event,
@@ -85,47 +87,84 @@
TRACE_EVENT(map,
- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
+ TP_PROTO(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot),
- TP_ARGS(iova, paddr, size),
+ TP_ARGS(domain, iova, paddr, size, prot),
TP_STRUCT__entry(
+ __string(name, domain->name)
__field(u64, iova)
__field(u64, paddr)
__field(size_t, size)
+ __field(int, prot)
),
TP_fast_assign(
+ __assign_str(name, domain->name);
__entry->iova = iova;
__entry->paddr = paddr;
__entry->size = size;
+ __entry->prot = prot;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
- __entry->iova, __entry->paddr, __entry->size
+ TP_printk("IOMMU:%s iova=0x%016llx paddr=0x%016llx size=0x%zx prot=0x%x",
+ __get_str(name), __entry->iova, __entry->paddr,
+ __entry->size, __entry->prot
)
);
TRACE_EVENT(unmap,
- TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
+ TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
+ size_t unmapped_size),
- TP_ARGS(iova, size, unmapped_size),
+ TP_ARGS(domain, iova, size, unmapped_size),
TP_STRUCT__entry(
+ __string(name, domain->name)
__field(u64, iova)
__field(size_t, size)
__field(size_t, unmapped_size)
),
TP_fast_assign(
+ __assign_str(name, domain->name);
__entry->iova = iova;
__entry->size = size;
__entry->unmapped_size = unmapped_size;
),
- TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
- __entry->iova, __entry->size, __entry->unmapped_size
+ TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx unmapped_size=0x%zx",
+ __get_str(name), __entry->iova, __entry->size,
+ __entry->unmapped_size
+ )
+);
+
+TRACE_EVENT(map_sg,
+
+ TP_PROTO(struct iommu_domain *domain, unsigned long iova, size_t size,
+ int prot),
+
+ TP_ARGS(domain, iova, size, prot),
+
+ TP_STRUCT__entry(
+ __string(name, domain->name)
+ __field(u64, iova)
+ __field(size_t, size)
+ __field(int, prot)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, domain->name);
+ __entry->iova = iova;
+ __entry->size = size;
+ __entry->prot = prot;
+ ),
+
+ TP_printk("IOMMU:%s iova=0x%016llx size=0x%zx prot=0x%x",
+ __get_str(name), __entry->iova, __entry->size,
+ __entry->prot
)
);
@@ -161,6 +200,66 @@
TP_ARGS(dev, iova, flags)
);
+
+DECLARE_EVENT_CLASS(iommu_tlbi,
+
+ TP_PROTO(struct device *dev, u64 time),
+
+ TP_ARGS(dev, time),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, time)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(dev));
+ __entry->time = time;
+ ),
+
+ TP_printk("IOMMU:%s %lld us",
+ __get_str(device), __entry->time
+ )
+);
+
+DEFINE_EVENT(iommu_tlbi, tlbi_start,
+
+ TP_PROTO(struct device *dev, u64 time),
+
+ TP_ARGS(dev, time)
+);
+
+DEFINE_EVENT(iommu_tlbi, tlbi_end,
+
+ TP_PROTO(struct device *dev, u64 time),
+
+ TP_ARGS(dev, time)
+);
+
+DEFINE_EVENT(iommu_tlbi, tlbsync_timeout,
+
+ TP_PROTO(struct device *dev, u64 time),
+
+ TP_ARGS(dev, time)
+);
+
+TRACE_EVENT(smmu_init,
+
+ TP_PROTO(u64 time),
+
+ TP_ARGS(time),
+
+ TP_STRUCT__entry(
+ __field(u64, time)
+ ),
+
+ TP_fast_assign(
+ __entry->time = time;
+ ),
+
+ TP_printk("ARM SMMU init latency: %lld us", __entry->time)
+);
+
#endif /* _TRACE_IOMMU_H */
/* This part must be outside protection */
diff --git a/include/trace/events/trace_msm_bus.h b/include/trace/events/trace_msm_bus.h
new file mode 100644
index 0000000..e21917c
--- /dev/null
+++ b/include/trace/events/trace_msm_bus.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_bus
+
+#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_BUS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(bus_update_request,
+
+ TP_PROTO(int sec, int nsec, const char *name, int src, int dest,
+ unsigned long long ab, unsigned long long ib),
+
+ TP_ARGS(sec, nsec, name, src, dest, ab, ib),
+
+ TP_STRUCT__entry(
+ __field(int, sec)
+ __field(int, nsec)
+ __string(name, name)
+ __field(int, src)
+ __field(int, dest)
+ __field(u64, ab)
+ __field(u64, ib)
+ ),
+
+ TP_fast_assign(
+ __entry->sec = sec;
+ __entry->nsec = nsec;
+ __assign_str(name, name);
+ __entry->src = src;
+ __entry->dest = dest;
+ __entry->ab = ab;
+ __entry->ib = ib;
+ ),
+
+ TP_printk("time= %u.%09u name=%s src=%d dest=%d ab=%llu ib=%llu",
+ __entry->sec,
+ __entry->nsec,
+ __get_str(name),
+ __entry->src,
+ __entry->dest,
+ (unsigned long long)__entry->ab,
+ (unsigned long long)__entry->ib)
+);
+
+TRACE_EVENT(bus_update_request_end,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ ),
+
+ TP_printk("client-name=%s", __get_str(name))
+);
+
+TRACE_EVENT(bus_bimc_config_limiter,
+
+ TP_PROTO(int mas_id, unsigned long long cur_lim_bw),
+
+ TP_ARGS(mas_id, cur_lim_bw),
+
+ TP_STRUCT__entry(
+ __field(int, mas_id)
+ __field(u64, cur_lim_bw)
+ ),
+
+ TP_fast_assign(
+ __entry->mas_id = mas_id;
+ __entry->cur_lim_bw = cur_lim_bw;
+ ),
+
+ TP_printk("Master=%d cur_lim_bw=%llu",
+ __entry->mas_id,
+ (unsigned long long)__entry->cur_lim_bw)
+);
+
+TRACE_EVENT(bus_avail_bw,
+
+ TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw),
+
+ TP_ARGS(cur_bimc_bw, cur_mdp_bw),
+
+ TP_STRUCT__entry(
+ __field(u64, cur_bimc_bw)
+ __field(u64, cur_mdp_bw)
+ ),
+
+ TP_fast_assign(
+ __entry->cur_bimc_bw = cur_bimc_bw;
+ __entry->cur_mdp_bw = cur_mdp_bw;
+ ),
+
+ TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu",
+ (unsigned long long)__entry->cur_bimc_bw,
+ (unsigned long long)__entry->cur_mdp_bw)
+);
+
+TRACE_EVENT(bus_rules_matches,
+
+ TP_PROTO(int node_id, int rule_id, unsigned long long node_ab,
+ unsigned long long node_ib, unsigned long long node_clk),
+
+ TP_ARGS(node_id, rule_id, node_ab, node_ib, node_clk),
+
+ TP_STRUCT__entry(
+ __field(int, node_id)
+ __field(int, rule_id)
+ __field(u64, node_ab)
+ __field(u64, node_ib)
+ __field(u64, node_clk)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = node_id;
+ __entry->rule_id = rule_id;
+ __entry->node_ab = node_ab;
+ __entry->node_ib = node_ib;
+ __entry->node_clk = node_clk;
+ ),
+
+ TP_printk("Rule match node%d rule%d node-ab%llu:ib%llu:clk%llu",
+ __entry->node_id, __entry->rule_id,
+ (unsigned long long)__entry->node_ab,
+ (unsigned long long)__entry->node_ib,
+ (unsigned long long)__entry->node_clk)
+);
+
+TRACE_EVENT(bus_bke_params,
+
+ TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh),
+
+ TP_ARGS(gc, gp, thl, thm, thh),
+
+ TP_STRUCT__entry(
+ __field(u32, gc)
+ __field(u32, gp)
+ __field(u32, thl)
+ __field(u32, thm)
+ __field(u32, thh)
+ ),
+
+ TP_fast_assign(
+ __entry->gc = gc;
+ __entry->gp = gp;
+ __entry->thl = thl;
+ __entry->thm = thm;
+ __entry->thh = thh;
+ ),
+
+ TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x",
+ __entry->gc, __entry->gp, __entry->thl, __entry->thm,
+ __entry->thh)
+);
+
+TRACE_EVENT(bus_client_status,
+
+ TP_PROTO(const char *name, int src, int dest,
+ unsigned long long ab, unsigned long long ib, int active_only),
+
+ TP_ARGS(name, src, dest, ab, ib, active_only),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(int, src)
+ __field(int, dest)
+ __field(u64, ab)
+ __field(u64, ib)
+ __field(int, active_only)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->src = src;
+ __entry->dest = dest;
+ __entry->ab = ab;
+ __entry->ib = ib;
+ __entry->active_only = active_only;
+ ),
+
+ TP_printk("name=%s src=%d dest=%d ab=%llu ib=%llu active_only=%d",
+ __get_str(name),
+ __entry->src,
+ __entry->dest,
+ (unsigned long long)__entry->ab,
+ (unsigned long long)__entry->ib,
+ __entry->active_only)
+);
+
+TRACE_EVENT(bus_agg_bw,
+
+ TP_PROTO(unsigned int node_id, int rpm_id, int ctx_set,
+ unsigned long long agg_ab),
+
+ TP_ARGS(node_id, rpm_id, ctx_set, agg_ab),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, node_id)
+ __field(int, rpm_id)
+ __field(int, ctx_set)
+ __field(u64, agg_ab)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = node_id;
+ __entry->rpm_id = rpm_id;
+ __entry->ctx_set = ctx_set;
+ __entry->agg_ab = agg_ab;
+ ),
+
+ TP_printk("node_id:%u rpm_id:%d rpm_ctx:%d agg_ab:%llu",
+ __entry->node_id,
+ __entry->rpm_id,
+ __entry->ctx_set,
+ (unsigned long long)__entry->agg_ab)
+);
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_bus
+#include <trace/define_trace.h>
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 478fabd..2423306 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -67,7 +67,7 @@
*/
struct snd_compr_tstamp {
__u32 byte_offset;
- __u32 copied_total;
+ __u64 copied_total;
__u32 pcm_frames;
__u32 pcm_io_frames;
__u32 sampling_rate;
@@ -123,17 +123,62 @@
} __attribute__((packed, aligned(4)));
/**
+ * struct snd_compr_audio_info: compressed input audio information
+ * @frame_size: legth of the encoded frame with valid data
+ * @reserved: reserved for furture use
+ */
+struct snd_compr_audio_info {
+ __u32 frame_size;
+ __u32 reserved[15];
+} __attribute__((packed, aligned(4)));
+
+#define SND_COMPR_AUDIO_INFO
+
+#define SNDRV_COMPRESS_RENDER_MODE_AUDIO_MASTER 0
+#define SNDRV_COMPRESS_RENDER_MODE_STC_MASTER 1
+
+#define SNDRV_COMPRESS_CLK_REC_MODE_NONE 0
+#define SNDRV_COMPRESS_CLK_REC_MODE_AUTO 1
+
+/**
* enum sndrv_compress_encoder
* @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
* end of the track
* @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the
* beginning of the track
+ * @SNDRV_COMPRESS_PATH_DELAY: dsp path delay in microseconds
+ * @SNDRV_COMPRESS_RENDER_MODE: dsp render mode (audio master or stc)
+ * @SNDRV_COMPRESS_CLK_REC_MODE: clock recovery mode ( none or auto)
+ * @SNDRV_COMPRESS_RENDER_WINDOW: render window
+ * @SNDRV_COMPRESS_START_DELAY: start delay
+ * @SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK: enable dsp drift correction
+ * @SNDRV_COMPRESS_ADJUST_SESSION_CLOCK: set drift correction value
*/
enum sndrv_compress_encoder {
SNDRV_COMPRESS_ENCODER_PADDING = 1,
SNDRV_COMPRESS_ENCODER_DELAY = 2,
+ SNDRV_COMPRESS_MIN_BLK_SIZE = 3,
+ SNDRV_COMPRESS_MAX_BLK_SIZE = 4,
+ SNDRV_COMPRESS_PATH_DELAY = 5,
+ SNDRV_COMPRESS_RENDER_MODE = 6,
+ SNDRV_COMPRESS_CLK_REC_MODE = 7,
+ SNDRV_COMPRESS_RENDER_WINDOW = 8,
+ SNDRV_COMPRESS_START_DELAY = 9,
+ SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK = 10,
+ SNDRV_COMPRESS_ADJUST_SESSION_CLOCK = 11,
};
+#define SNDRV_COMPRESS_MIN_BLK_SIZE SNDRV_COMPRESS_MIN_BLK_SIZE
+#define SNDRV_COMPRESS_MAX_BLK_SIZE SNDRV_COMPRESS_MAX_BLK_SIZE
+#define SNDRV_COMPRESS_PATH_DELAY SNDRV_COMPRESS_PATH_DELAY
+#define SNDRV_COMPRESS_RENDER_MODE SNDRV_COMPRESS_RENDER_MODE
+#define SNDRV_COMPRESS_CLK_REC_MODE SNDRV_COMPRESS_CLK_REC_MODE
+#define SNDRV_COMPRESS_RENDER_WINDOW SNDRV_COMPRESS_RENDER_WINDOW
+#define SNDRV_COMPRESS_START_DELAY SNDRV_COMPRESS_START_DELAY
+#define SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK \
+ SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK
+#define SNDRV_COMPRESS_ADJUST_SESSION_CLOCK SNDRV_COMPRESS_ADJUST_SESSION_CLOCK
+
/**
* struct snd_compr_metadata - compressed stream metadata
* @key: key id
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index e9da492..4cf8c9f 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -69,6 +69,11 @@
#define Q6_DTS 0x00010D88
#define Q6_DTS_LBR 0x00010DBB
+/* Timestamp flsg */
+/* Bit-0 - 1 : Enable Timestamp mode */
+/* Bit-0 - 0 : Disable Timestamp mode */
+#define COMPRESSED_TIMESTAMP_FLAG 0x0001
+
/* Codecs are listed linearly to allow for extensibility */
#define SND_AUDIOCODEC_PCM ((__u32) 0x00000001)
#define SND_AUDIOCODEC_MP3 ((__u32) 0x00000002)
@@ -95,7 +100,12 @@
#define SND_AUDIOCODEC_WMA_PRO ((__u32) 0x00000017)
#define SND_AUDIOCODEC_DTS ((__u32) 0x00000018)
#define SND_AUDIOCODEC_EAC3 ((__u32) 0x00000019)
-#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_EAC3
+#define SND_AUDIOCODEC_ALAC ((__u32) 0x00000020)
+#define SND_AUDIOCODEC_APE ((__u32) 0x00000021)
+#define SND_AUDIOCODEC_DSD ((__u32) 0x00000022)
+#define SND_AUDIOCODEC_APTX ((__u32) 0x00000023)
+#define SND_AUDIOCODEC_TRUEHD ((__u32) 0x00000024)
+#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_TRUEHD
/*
* Profile and modes are listed with bit masks. This allows for a
@@ -261,8 +271,15 @@
struct snd_enc_wma {
__u32 super_block_align; /* WMA Type-specific data */
+ __u32 bits_per_sample;
+ __u32 channelmask;
+ __u32 encodeopt;
+ __u32 encodeopt1;
+ __u32 encodeopt2;
+ __u32 avg_bit_rate;
};
+#define SND_ENC_WMA_EXTENTED_SUPPORT
/**
* struct snd_enc_vorbis
@@ -337,13 +354,66 @@
__s32 reserved[15]; /* Can be used for SND_AUDIOCODEC_BESPOKE */
} __attribute__((packed, aligned(4)));
+struct snd_dec_flac {
+ __u16 sample_size;
+ __u16 min_blk_size;
+ __u16 max_blk_size;
+ __u16 min_frame_size;
+ __u16 max_frame_size;
+} __attribute__((packed, aligned(4)));
+
+#define SND_DEC_FLAC_SUPPORTED
+
+struct snd_dec_vorbis {
+ __u32 bit_stream_fmt;
+};
+
+struct snd_dec_alac {
+ __u32 frame_length;
+ __u8 compatible_version;
+ __u8 bit_depth;
+ __u8 pb;
+ __u8 mb;
+ __u8 kb;
+ __u8 num_channels;
+ __u16 max_run;
+ __u32 max_frame_bytes;
+ __u32 avg_bit_rate;
+ __u32 sample_rate;
+ __u32 channel_layout_tag;
+};
+
+struct snd_dec_ape {
+ __u16 compatible_version;
+ __u16 compression_level;
+ __u32 format_flags;
+ __u32 blocks_per_frame;
+ __u32 final_frame_blocks;
+ __u32 total_frames;
+ __u16 bits_per_sample;
+ __u16 num_channels;
+ __u32 sample_rate;
+ __u32 seek_table_present;
+};
+
+struct snd_dec_aptx {
+ __u32 lap;
+ __u32 uap;
+ __u32 nap;
+};
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
struct snd_enc_real real;
struct snd_enc_flac flac;
struct snd_enc_generic generic;
-} __attribute__((packed, aligned(4)));
+ struct snd_dec_flac flac_dec;
+ struct snd_dec_vorbis vorbis_dec;
+ struct snd_dec_alac alac;
+ struct snd_dec_ape ape;
+ struct snd_dec_aptx aptx_dec;
+};
/** struct snd_codec_desc - description of codec capabilities
* @max_ch: Maximum number of audio channels
@@ -404,6 +474,7 @@
* @align: Block alignment in bytes of an audio sample.
* Only required for PCM or IEC formats.
* @options: encoder-specific settings
+ * @compr_passthr: compressed bitstream passthrough
* @reserved: reserved for future use
*/
@@ -420,7 +491,26 @@
__u32 format;
__u32 align;
union snd_codec_options options;
- __u32 reserved[3];
+ __u32 compr_passthr;
+ __u32 flags;
+ __u32 reserved[1];
} __attribute__((packed, aligned(4)));
+#define SND_CODEC_COMPRESS_PASSTHROUGH
+
+/** struct snd_codec_metadata
+ * @length: Length of the encoded buffer.
+ * @offset: Offset from the buffer address to the first byte of the first
+ * encoded frame. All encoded frames are consecutive starting
+ * from this offset.
+ * @timestamp: Session time in microseconds of the first sample in the buffer.
+ * @reserved: Reserved for future use.
+ */
+struct snd_codec_metadata {
+ __u32 length;
+ __u32 offset;
+ __u64 timestamp;
+ __u32 reserved[4];
+};
+
#endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5224704..ffb6553 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2231,6 +2231,9 @@
struct cpumask __cpu_active_mask __read_mostly;
EXPORT_SYMBOL(__cpu_active_mask);
+struct cpumask __cpu_isolated_mask __read_mostly;
+EXPORT_SYMBOL(__cpu_isolated_mask);
+
void init_cpu_present(const struct cpumask *src)
{
cpumask_copy(&__cpu_present_mask, src);
@@ -2246,6 +2249,11 @@
cpumask_copy(&__cpu_online_mask, src);
}
+void init_cpu_isolated(const struct cpumask *src)
+{
+ cpumask_copy(&__cpu_isolated_mask, src);
+}
+
/*
* Activate the first processor.
*/
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
index 6ef25e5..0a767a7 100644
--- a/kernel/trace/ipc_logging_debug.c
+++ b/kernel/trace/ipc_logging_debug.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
@@ -70,18 +70,19 @@
struct dentry *d = file->f_path.dentry;
char *buffer;
int bsize;
- int srcu_idx;
int r;
- r = debugfs_use_file_start(d, &srcu_idx);
- if (!r) {
- ilctxt = file->private_data;
- r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
- }
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(d);
if (r)
return r;
+ ilctxt = file->private_data;
+ r = kref_get_unless_zero(&ilctxt->refcount) ? 0 : -EIO;
+ if (r) {
+ debugfs_file_put(d);
+ return r;
+ }
+
buffer = kmalloc(count, GFP_KERNEL);
if (!buffer) {
bsize = -ENOMEM;
@@ -102,6 +103,7 @@
done:
ipc_log_context_put(ilctxt);
+ debugfs_file_put(d);
return bsize;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2d03842..3be4975 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -283,6 +283,17 @@
If unsure, say N.
+config PAGE_OWNER_ENABLE_DEFAULT
+ bool "Enable Track page owner by default"
+ depends on PAGE_OWNER
+ help
+ This keeps track of what call chain is the owner of a page, may
+ help to find bare alloc_page(s) leaks. If you include this
+ feature on your build, it is enabled by default. You should pass
+ "page_owner=off" to boot parameter in order to disable it. Eats
+ a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
+ for user-space helper.
+
config DEBUG_FS
bool "Debug Filesystem"
help
diff --git a/lib/Makefile b/lib/Makefile
index ca3f7eb..3e2f6c3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -33,6 +33,7 @@
lib-y += kobject.o klist.o
obj-y += lockref.o
+KASAN_SANITIZE_find_bit.o := n
obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 9a7b8b0..2d50b70 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -54,6 +54,16 @@
If unsure, say N
+config PAGE_POISONING_ENABLE_DEFAULT
+ bool "Enable page poisoning by default?"
+ default n
+ depends on PAGE_POISONING
+ help
+ Enable page poisoning of free pages by default? This value
+ can be overridden by page_poison=off|on. This can be used
+ to avoid passing the kernel parameter and let page poisoning
+ feature enabled by default.
+
config PAGE_POISONING_NO_SANITY
depends on PAGE_POISONING
bool "Only poison, don't sanity check"
diff --git a/mm/Makefile b/mm/Makefile
index 26ef77a..7332f89 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -39,7 +39,7 @@
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y)
+ debug.o $(mmu-y) showmem.o
obj-y += init-mm.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 97db0e8..c20dbd6 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -192,7 +192,7 @@
return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
-void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
+void free_bootmem_late(unsigned long physaddr, unsigned long size)
{
unsigned long cursor, end;
diff --git a/mm/filemap.c b/mm/filemap.c
index 59ebf34..8f75138 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -192,10 +192,12 @@
* invalidate any existing cleancache entries. We can't leave
* stale data around in the cleancache once our page is gone
*/
- if (PageUptodate(page) && PageMappedToDisk(page))
+ if (PageUptodate(page) && PageMappedToDisk(page)) {
+ count_vm_event(PGPGOUTCLEAN);
cleancache_put_page(page);
- else
+ } else {
cleancache_invalidate_page(mapping, page);
+ }
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(page_mapped(page), page);
diff --git a/mm/memory.c b/mm/memory.c
index c467102..93767ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3514,7 +3514,7 @@
}
static unsigned long fault_around_bytes __read_mostly =
- rounddown_pow_of_two(65536);
+ rounddown_pow_of_two(4096);
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 439af3b..2bd550e 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -81,7 +81,7 @@
* down, but we are still initializing the system. Pages are given directly
* to the page allocator, no bootmem metadata is updated because it is gone.
*/
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void free_bootmem_late(unsigned long addr, unsigned long size)
{
unsigned long cursor, end;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7ae2649..a3b3a6a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1264,7 +1264,7 @@
local_irq_restore(flags);
}
-static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+static void __free_pages_boot_core(struct page *page, unsigned int order)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
@@ -1338,7 +1338,7 @@
#endif
-void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+void __free_pages_bootmem(struct page *page, unsigned long pfn,
unsigned int order)
{
if (early_page_uninitialised(pfn))
@@ -2450,14 +2450,14 @@
static __always_inline struct page *
__rmqueue(struct zone *zone, unsigned int order, int migratetype)
{
- struct page *page;
+ struct page *page = NULL;
retry:
- page = __rmqueue_smallest(zone, order, migratetype);
- if (unlikely(!page)) {
- if (migratetype == MIGRATE_MOVABLE)
- page = __rmqueue_cma_fallback(zone, order);
+ if (migratetype == MIGRATE_MOVABLE)
+ page = __rmqueue_cma_fallback(zone, order);
+ if (!page) {
+ page = __rmqueue_smallest(zone, order, migratetype);
if (!page && __rmqueue_fallback(zone, order, migratetype))
goto retry;
}
diff --git a/mm/page_io.c b/mm/page_io.c
index aafd19e..abc1466 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -63,8 +63,9 @@
* Also clear PG_reclaim to avoid rotate_reclaimable_page()
*/
set_page_dirty(page);
- pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
- MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
+ pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
+ MAJOR(bio_dev(bio)),
+ MINOR(bio_dev(bio)),
(unsigned long long)bio->bi_iter.bi_sector);
ClearPageReclaim(page);
}
diff --git a/mm/page_owner.c b/mm/page_owner.c
index d80adfe..97ac8c7 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -26,7 +26,8 @@
depot_stack_handle_t handle;
};
-static bool page_owner_disabled = true;
+static bool page_owner_disabled =
+ !IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT);
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
static depot_stack_handle_t dummy_handle;
@@ -43,6 +44,9 @@
if (strcmp(buf, "on") == 0)
page_owner_disabled = false;
+ if (strcmp(buf, "off") == 0)
+ page_owner_disabled = true;
+
return 0;
}
early_param("page_owner", early_page_owner_param);
diff --git a/mm/page_poison.c b/mm/page_poison.c
index aa2b3d3..ffa7c7c 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -7,7 +7,8 @@
#include <linux/poison.h>
#include <linux/ratelimit.h>
-static bool want_page_poisoning __read_mostly;
+static bool want_page_poisoning __read_mostly
+ = IS_ENABLED(CONFIG_PAGE_POISONING_ENABLE_DEFAULT);
static int __init early_page_poison_param(char *buf)
{
diff --git a/mm/showmem.c b/mm/showmem.c
new file mode 100644
index 0000000..62bea47
--- /dev/null
+++ b/mm/showmem.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+
+ATOMIC_NOTIFIER_HEAD(show_mem_notifier);
+
+int show_mem_notifier_register(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&show_mem_notifier, nb);
+}
+
+int show_mem_notifier_unregister(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&show_mem_notifier, nb);
+}
+
+void show_mem_call_notifiers(void)
+{
+ atomic_notifier_call_chain(&show_mem_notifier, 0, NULL);
+}
+
+static int show_mem_notifier_get(void *dat, u64 *val)
+{
+ show_mem_call_notifiers();
+ *val = 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(show_mem_notifier_debug_ops, show_mem_notifier_get,
+ NULL, "%llu\n");
+
+int show_mem_notifier_debugfs_register(void)
+{
+ debugfs_create_file("show_mem_notifier", 0664, NULL, NULL,
+ &show_mem_notifier_debug_ops);
+
+ return 0;
+}
+late_initcall(show_mem_notifier_debugfs_register);
diff --git a/mm/slub.c b/mm/slub.c
index 8da34a8..0fc48b4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -687,11 +687,21 @@
dump_stack();
}
+#ifdef CONFIG_SLUB_DEBUG_PANIC_ON
+static void slab_panic(const char *cause)
+{
+ panic("%s\n", cause);
+}
+#else
+static inline void slab_panic(const char *cause) {}
+#endif
+
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
slab_bug(s, "%s", reason);
print_trailer(s, page, object);
+ slab_panic(reason);
}
static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
@@ -706,6 +716,7 @@
slab_bug(s, "%s", buf);
print_page_info(page);
dump_stack();
+ slab_panic("slab error");
}
static void init_object(struct kmem_cache *s, void *object, u8 val)
@@ -727,6 +738,7 @@
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
void *from, void *to)
{
+ slab_panic("object poison overwritten");
slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
memset(from, data, to - from);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c7ce2c1..0871a3c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2768,16 +2768,23 @@
}
} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
+ /*
+ * Record the subtree's reclaim efficiency. The reclaimed
+ * pages from slab is excluded here because the corresponding
+ * scanned pages is not accounted. Moreover, freeing a page
+ * by slab shrinking depends on each slab's object population,
+ * making the cost model (i.e. scan:free) different from that
+ * of LRU.
+ */
+ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
+ sc->nr_scanned - nr_scanned,
+ sc->nr_reclaimed - nr_reclaimed);
+
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
- /* Record the subtree's reclaim efficiency */
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
-
if (sc->nr_reclaimed - nr_reclaimed)
reclaimable = true;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8ba0870..f8df216 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1171,6 +1171,7 @@
/* enum vm_event_item counters */
"pgpgin",
"pgpgout",
+ "pgpgoutclean",
"pswpin",
"pswpout",
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4791aa8..ab2f280 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1480,6 +1480,9 @@
(ut[i].family != prev_family))
return -EINVAL;
+ if (ut[i].mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+
prev_family = ut[i].family;
switch (ut[i].family) {
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index e404924..5fb078a 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -186,7 +186,7 @@
err = stream->ops->pointer(stream, tstamp);
if (err)
return err;
- pr_debug("dsp consumed till %d total %d bytes\n",
+ pr_debug("dsp consumed till %d total %llu bytes\n",
tstamp->byte_offset, tstamp->copied_total);
if (stream->direction == SND_COMPRESS_PLAYBACK)
stream->runtime->total_bytes_transferred = tstamp->copied_total;